Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'develop' into eval_splitters
Browse files Browse the repository at this point in the history
Signed-off-by: Bruna Junqueira Lopes <99977808+brunaafl@users.noreply.github.com>
brunaafl authored Sep 30, 2024
2 parents 26b13d5 + ca79b90 commit e6661c4
Showing 60 changed files with 1,227 additions and 3,795 deletions.
139 changes: 70 additions & 69 deletions .github/workflows/docs.yml
Original file line number Diff line number Diff line change
@@ -1,23 +1,27 @@
name: Docs

concurrency:
group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
cancel-in-progress: true


on:
push:
branches: [master, develop]
branches: [ master, develop ]
pull_request:
branches: [master, develop]
branches: [ master, develop ]
permissions:
contents: write
pages: write
id-token: write

jobs:
build_docs:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: true
matrix:
os: [ubuntu-latest]
python-version: ["3.9"]
os: [ ubuntu-latest ]
python-version: [ "3.9" ]

steps:
- uses: actions/checkout@v4
@@ -27,7 +31,7 @@ jobs:
mkdir ~/mne_data
- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

@@ -37,63 +41,79 @@ jobs:
virtualenvs-create: true
virtualenvs-in-project: true

- name: Cache datasets and docs
id: cached-dataset-docs
- name: Create/Restore MNE Data Cache
id: cache-mne_data
uses: actions/cache@v3
with:
path: ~/mne_data
key: ${{ runner.os }}-mne_data

- name: Cache docs build
id: cache-docs
uses: actions/cache@v3
with:
key: doc-${{ github.head_ref }}-${{ hashFiles('moabb/datasets/**') }}
path: |
~/mne_data
docs/build
key: docs-build-${{ github.run_id }}-${{ github.run_attempt }}
path: docs/build

- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key:
docsvenv-${{ matrix.os }}-py${{matrix.python-version}}-${{
hashFiles('**/pyproject.toml') }}

- name: Install dependencies
if: steps.cached-dataset-docs.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root --with docs --extras deeplearning
if: (steps.cached-poetry-dependencies.outputs.cache-hit != 'true')
run: poetry install --no-interaction --no-root --with docs --extras deeplearning --extras optuna

- name: Install library
run: poetry install --no-interaction --with docs --extras deeplearning
run: poetry install --no-interaction --with docs --extras deeplearning --extras optuna

- name: Build docs
run: |
cd docs && poetry run make html
# Create an artifact of the html output.
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v4
with:
name: DocumentationHTML
path: docs/build/html/

deploy_docs:
if: ${{ github.ref == 'refs/heads/master' }}
deploy_neurotechx:
if: ${{ github.ref == 'refs/heads/develop' }}
needs: build_docs
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
os: [ ubuntu-latest ]

steps:
- uses: actions/checkout@v4

- name: Create local data folder
run: |
mkdir ~/mne_data
- name: Cache datasets and docs
id: cached-dataset-docs
uses: actions/cache@v3
- name: Restore cached docs build
id: cache-docs
uses: actions/cache/restore@v3
with:
key: doc-${{ github.head_ref }}-${{ hashFiles('moabb/datasets/**') }}
path: |
~/mne_data
docs/build
key: docs-build-${{ github.run_id }}-${{ github.run_attempt }}
path: docs/build

- name: Checkout moabb.github.io
uses: actions/checkout@v4
- name: Check cache hit
if: steps.cache-docs.outputs.cache-hit != 'true'
run: exit 1

- name: Deploy Neurotechx Subpage
uses: peaceiris/actions-gh-pages@v4
with:
repository: "NeuroTechX/moabb.github.io"
path: moabb-ghio
token: ${{ secrets.MOABB_GHIO }}
github_token: ${{ secrets.GITHUB_TOKEN }}
deploy_key: ${{ secrets.ACTIONS_DEPLOY_KEY }}
external_repository: NeuroTechX/moabb.github.io
destination_dir: docs/
publish_branch: master
publish_dir: ./docs/build/html
cname: moabb.neurotechx.com/

deploy_gh_pages:
if: ${{ github.ref == 'refs/heads/develop' }}
@@ -102,47 +122,28 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
os: [ ubuntu-latest ]

steps:
- uses: actions/checkout@v4

- name: Create local data folder
run: |
mkdir ~/mne_data
- name: Cache datasets and docs
id: cached-dataset-docs
uses: actions/cache@v3
- name: Restore cached docs build
id: cache-docs
uses: actions/cache/restore@v3
with:
key: doc-${{ github.head_ref }}-${{ hashFiles('moabb/datasets/**') }}
path: |
~/mne_data
docs/build
key: docs-build-${{ github.run_id }}-${{ github.run_attempt }}
path: docs/build

- name: Checkout gh pages
uses: actions/checkout@v4
with:
ref: gh-pages
path: moabb-ghpages
- name: Check cache hit
if: steps.cache-docs.outputs.cache-hit != 'true'
run: exit 1

- name: Deploy Neurotechx Subpage
uses: peaceiris/actions-gh-pages@v3
- name: Deploy gh-pages
uses: peaceiris/actions-gh-pages@v4
with:
deploy_key: ${{ secrets.ACTIONS_DEPLOY_KEY }}
external_repository: NeuroTechX/moabb.github.io
github_token: ${{ secrets.GITHUB_TOKEN }}
deploy_key: ${{ secrets.MOABB_DEPLOY_KEY_NEW }}
destination_dir: docs/
publish_branch: master
publish_branch: gh-pages
publish_dir: ./docs/build/html
cname: moabb.neurotechx.com/

- name: Deploy on gh-pages
run: |
git config --global user.email "ci@neurotechx.com"
git config --global user.name "Github Actions"
cd ~/work/moabb/moabb/moabb-ghpages
rm -Rf docs
cp -a ~/work/moabb/moabb/docs/build/html ./docs
git add -A
git commit -m "GH Actions update of GH pages ($GITHUB_RUN_ID - $GITHUB_RUN_NUMBER)"
git push origin gh-pages
cname: neurotechx.github.io/moabb/
36 changes: 20 additions & 16 deletions .github/workflows/test-braindecode.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
name: Test-braindecode

concurrency:
group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
cancel-in-progress: true


on:
push:
branches: [develop]
branches: [ develop ]
pull_request:
branches: [develop]
branches: [ develop ]

jobs:
test:
@@ -18,8 +16,8 @@ jobs:
strategy:
fail-fast: true
matrix:
os: [ubuntu-latest]
python-version: ["3.8"]
os: [ ubuntu-latest ]
python-version: [ "3.8" ]
defaults:
run:
shell: bash
@@ -33,8 +31,21 @@ jobs:
repository: braindecode/braindecode
path: braindecode

- name: Create local data folder
if: runner.os != 'Windows'
run: |
mkdir ~/mne_data
- name: Create/Restore MNE Data Cache
if: runner.os != 'Windows'
id: cache-mne_data
uses: actions/cache@v3
with:
path: ~/mne_data
key: ${{ runner.os }}-mne_data

- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

@@ -44,22 +55,15 @@ jobs:
virtualenvs-create: true
virtualenvs-in-project: true

- name: Create/Restore MNE Data Cache
id: cache-mne_data
uses: actions/cache@v3
with:
path: ~/mne_data
key: ${{ runner.os }}-mne

- name: Load cached venv
if: runner.os != 'Windows'
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key:
testvenv-${{ matrix.os }}-py${{matrix.python-version}}-${{
hashFiles('**/poetry.lock') }}
testvenv-braindecode-${{ matrix.os }}-py${{matrix.python-version}}-${{
hashFiles('**/pyproject.toml') }}

- name: Install dependencies
if: |
33 changes: 22 additions & 11 deletions .github/workflows/test-devel.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
name: Test-devel

concurrency:
group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
cancel-in-progress: true


on:
push:
branches: [develop]
branches: [ develop ]
pull_request:
branches: [develop]
branches: [ develop ]

jobs:
test:
@@ -18,16 +16,29 @@ jobs:
strategy:
fail-fast: true
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
python-version: ["3.9", "3.10"]
os: [ ubuntu-latest, windows-latest, macOS-latest ]
python-version: [ "3.9", "3.10" ]
defaults:
run:
shell: bash
steps:
- uses: actions/checkout@v4

- name: Create local data folder
if: runner.os != 'Windows'
run: |
mkdir ~/mne_data
- name: Create/Restore MNE Data Cache
if: runner.os != 'Windows'
id: cache-mne_data
uses: actions/cache@v3
with:
path: ~/mne_data
key: ${{ runner.os }}-mne_data

- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

@@ -45,21 +56,21 @@ jobs:
path: .venv
key:
testvenv-${{ matrix.os }}-py${{matrix.python-version}}-${{
hashFiles('**/poetry.lock') }}
hashFiles('**/pyproject.toml') }}

- name: Install dependencies
if: |
(runner.os != 'Windows') &&
(steps.cached-poetry-dependencies.outputs.cache-hit != 'true')
run: poetry install --no-interaction --no-root --extras deeplearning
run: poetry install --no-interaction --no-root --extras deeplearning --extras optuna

- name: Install library (Linux/OSX)
if: ${{ runner.os != 'Windows' }}
run: poetry install --no-interaction --extras deeplearning
run: poetry install --no-interaction --extras deeplearning --extras optuna

- name: Install library (Windows)
if: ${{ runner.os == 'Windows' }}
run: poetry install --no-interaction
run: poetry install --no-interaction --extras optuna

- name: Run tests
run: |
35 changes: 23 additions & 12 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
name: Test

concurrency:
group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
cancel-in-progress: true


on:
push:
branches: [master]
branches: [ master ]
pull_request:
branches: [master]
branches: [ master ]

jobs:
test:
@@ -18,16 +16,29 @@ jobs:
strategy:
fail-fast: true
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
python-version: ["3.9"]
os: [ ubuntu-latest, windows-latest, macOS-latest ]
python-version: [ "3.9", "3.10" ]
defaults:
run:
shell: bash
steps:
- uses: actions/checkout@v4

- name: Create local data folder
if: runner.os != 'Windows'
run: |
mkdir ~/mne_data
- name: Create/Restore MNE Data Cache
if: runner.os != 'Windows'
id: cache-mne_data
uses: actions/cache@v3
with:
path: ~/mne_data
key: ${{ runner.os }}-mne_data

- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

@@ -44,22 +55,22 @@ jobs:
with:
path: .venv
key:
venv-${{ matrix.os }}-py${{matrix.python-version}}-${{
hashFiles('**/poetry.lock') }}
testvenv-${{ matrix.os }}-py${{matrix.python-version}}-${{
hashFiles('**/pyproject.toml') }}

- name: Install dependencies
if: |
(runner.os != 'Windows') &&
(steps.cached-poetry-dependencies.outputs.cache-hit != 'true')
run: poetry install --no-interaction --no-root --extras deeplearning
run: poetry install --no-interaction --no-root --extras deeplearning --extras optuna

- name: Install library (Linux/OSX)
if: ${{ runner.os != 'Windows' }}
run: poetry install --no-interaction --extras deeplearning
run: poetry install --no-interaction --extras deeplearning --extras optuna

- name: Install library (Windows)
if: ${{ runner.os == 'Windows' }}
run: poetry install --no-interaction
run: poetry install --no-interaction --extras optuna

- name: Run tests
run: |
16 changes: 8 additions & 8 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@ exclude: ".*svg"

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: v4.6.0
hooks:
- id: check-yaml
- id: check-json
@@ -35,14 +35,14 @@ repos:


- repo: https://github.com/psf/black
rev: 24.3.0
rev: 24.4.2
hooks:
- id: black
language_version: python3.8
language_version: python3
args: [ --line-length=90, --target-version=py38 ]

- repo: https://github.com/asottile/blacken-docs
rev: 1.16.0
rev: 1.18.0
hooks:
- id: blacken-docs
additional_dependencies: [black==23.3.0]
@@ -54,7 +54,7 @@ repos:
- id: isort

- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
rev: 7.1.0
hooks:
- id: flake8
additional_dependencies: [
@@ -69,17 +69,17 @@ repos:
exclude: ^docs/ | ^setup\.py$ |

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.5
rev: v0.5.0
hooks:
- id: ruff
args: [ --fix, --exit-non-zero-on-fix, --ignore, E501 ]

- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
rev: v2.3.0
hooks:
- id: codespell
args:
- --ignore-words-list=additionals,alle,alot,bund,currenty,datas,farenheit,falsy,fo,haa,hass,iif,incomfort,ines,ist,nam,nd,pres,pullrequests,resset,rime,ser,serie,te,technik,ue,unsecure,withing,zar,crate
- --ignore-words-list=assertIn,additionals,alle,alot,bund,currenty,datas,farenheit,falsy,fo,haa,hass,iif,incomfort,ines,ist,nam,nd,pres,pullrequests,resset,rime,ser,serie,te,technik,ue,unsecure,withing,zar,crate
- --skip="./.*,*.csv,*.json,*.ambr"
- --quiet-level=2
exclude_types: [ csv, json, svg ]
8 changes: 4 additions & 4 deletions CITATION.cff
Original file line number Diff line number Diff line change
@@ -24,8 +24,8 @@ authors:
- family-names: "Bjareholt"
given-names: "Erik"
orcid: "https://orcid.org/0000-0003-1350-9677"
- family-names: "Quentin"
given-names: "Barthelemy"
- family-names: "Barthelemy"
given-names: "Quentin"
orcid: "https://orcid.org/0000-0002-7059-6028"
- family-names: "Schirrmeister"
given-names: "Robin Tibor"
@@ -70,7 +70,7 @@ authors:
given-names: "Sylvain"
orcid: "https://orcid.org/0000-0003-3027-8241"
title: "Mother of all BCI Benchmarks"
version: 1.1.0
version: 1.1.1
doi: 10.5281/zenodo.10034223
date-released: 2024-05-30
date-released: 2024-09-16
url: "https://github.com/NeuroTechX/moabb"
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -291,18 +291,18 @@ If you use MOABB in your experiments, please cite this library when
publishing a paper to increase the visibility of open science initiatives:

```
Aristimunha, B., Carrara, I., Guetschel, P., Sedlar, S., Rodrigues, P., Sosulski, J., Narayanan, D., Bjareholt, E., Quentin, B., Schirrmeister, R. T.,Kalunga, E., Darmet, L., Gregoire, C., Abdul Hussain, A., Gatti, R., Goncharenko, V., Thielen, J., Moreau, T., Roy, Y., Jayaram, V., Barachant,A., & Chevallier, S.
Aristimunha, B., Carrara, I., Guetschel, P., Sedlar, S., Rodrigues, P., Sosulski, J., Narayanan, D., Bjareholt, E., Barthelemy, Q., Reinmar, K., Schirrmeister, R. T.,Kalunga, E., Darmet, L., Gregoire, C., Abdul Hussain, A., Gatti, R., Goncharenko, V., Thielen, J., Moreau, T., Roy, Y., Jayaram, V., Barachant,A., & Chevallier, S.
Mother of all BCI Benchmarks (MOABB), 2023. DOI: 10.5281/zenodo.10034223.
```
and here is the Bibtex version:
```bibtex
@software{Aristimunha_Mother_of_all_2023,
author = {Aristimunha, Bruno and Carrara, Igor and Guetschel, Pierre and Sedlar, Sara and Rodrigues, Pedro and Sosulski, Jan and Narayanan, Divyesh and Bjareholt, Erik and Quentin, Barthelemy and Schirrmeister, Robin Tibor and Kalunga, Emmanuel and Darmet, Ludovic and Gregoire, Cattan and Abdul Hussain, Ali and Gatti, Ramiro and Goncharenko, Vladislav and Thielen, Jordy and Moreau, Thomas and Roy, Yannick and Jayaram, Vinay and Barachant, Alexandre and Chevallier, Sylvain},
@software{Aristimunha_Mother_of_all,
author = {Aristimunha, Bruno and Carrara, Igor and Guetschel, Pierre and Sedlar, Sara and Rodrigues, Pedro and Sosulski, Jan and Narayanan, Divyesh and Bjareholt, Erik and Barthelemy, Quentin and Kobler, Reinmar and Schirrmeister, Robin Tibor and Kalunga, Emmanuel and Darmet, Ludovic and Gregoire, Cattan and Abdul Hussain, Ali and Gatti, Ramiro and Goncharenko, Vladislav and Thielen, Jordy and Moreau, Thomas and Roy, Yannick and Jayaram, Vinay and Barachant, Alexandre and Chevallier, Sylvain},
doi = {10.5281/zenodo.10034223},
title = {{Mother of all BCI Benchmarks}},
url = {https://github.com/NeuroTechX/moabb},
version = {1.1.0},
year = {2023}
year = {2024}
}
```
If you want to cite the scientific contributions of MOABB, you could use the following paper:
1 change: 1 addition & 0 deletions docs/Makefile
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@ help:
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@python prepare_summary_tables.py ../moabb/datasets $(BUILDDIR)
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

apidoc:
34 changes: 34 additions & 0 deletions docs/prepare_summary_tables.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import glob
from argparse import ArgumentParser
from pathlib import Path

import pandas as pd


def prepare_table(df: pd.DataFrame):
no_pwc = df["PapersWithCode leaderboard"].isna()
df.loc[no_pwc, "PapersWithCode leaderboard"] = "No"
df.loc[~no_pwc, "PapersWithCode leaderboard"] = df.loc[
~no_pwc, "PapersWithCode leaderboard"
].apply(lambda x: f"`Yes <{x}>`_")
df["Dataset"] = df["Dataset"].apply(lambda x: f":class:`{x}`")


def main(source_dir: str, target_dir: str):
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
files = glob.glob(str(Path(source_dir) / "*.csv"))
for f in files:
target_file = target_dir / Path(f).name
print(f"Processing {f} -> {target_file}")
df = pd.read_csv(f, index_col=False, header=0, skipinitialspace=True)
prepare_table(df)
df.to_csv(target_file, index=False)


if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("source_dir", type=str)
parser.add_argument("target_dir", type=str)
args = parser.parse_args()
main(args.source_dir, args.target_dir)
3 changes: 0 additions & 3 deletions docs/source/README.md
Original file line number Diff line number Diff line change
@@ -82,7 +82,6 @@ The project is currently maintained by:
<th>Bruno Aristimunha</th>
<th>Igor Carrara</th>
<th>Pierre Guetschel</th>
<th>Sara Sedlar</th>
</tr>
</thead>
<tbody>
@@ -91,7 +90,6 @@ The project is currently maintained by:
<td style="padding: 0 7px;"><img src="https://avatars.githubusercontent.com/u/42702466?s=150&amp;v=4" alt="Bruno Aristimunha"></td>
<td style="padding: 0 7px;"><img src="https://avatars.githubusercontent.com/u/94047258?s=150&amp;v=4" alt="Igor Carrara"></td>
<td style="padding: 0 7px;"><img src="https://avatars.githubusercontent.com/u/25532709?s=150&amp;v=4" alt="Pierre Guetschel"></td>
<td style="padding: 0 7px;"><img src="https://avatars.githubusercontent.com/u/5344945?s=150&amp;v=4" alt="Sara Sedlar"></td>
</tr>
</tbody>
</table>
@@ -230,7 +228,6 @@ the link on the gitter channel. We are also on NeuroTechX Slack channel
[link_bruno]: https://www.linkedin.com/in/bruaristimunha/
[link_igor]: https://www.linkedin.com/in/carraraig/
[link_pierre]: https://www.linkedin.com/in/pierreguetschel/
[link_sara]: https://www.linkedin.com/in/sara-sedlar-28709893/
[link_neurotechx_signup]: https://neurotechx.com/
[link_gitter]: https://app.gitter.im/#/room/#moabb_dev_community:gitter.im
[link_moabb_docs]: https://neurotechx.github.io/moabb/
70 changes: 11 additions & 59 deletions docs/source/dataset_summary.rst
Original file line number Diff line number Diff line change
@@ -22,66 +22,28 @@ Motor Imagery
======================

.. csv-table::
:header: Dataset, #Subj, #Chan, #Classes, #Trials, Trial length, Freq, #Session, #Runs, Total_trials
:file: ../build/summary_imagery.csv
:header-rows: 1
:class: sortable

:class:`AlexMI`,8,16,3,20,3s,512Hz,1,1,480
:class:`BNCI2014_001`,9,22,4,144,4s,250Hz,2,6,62208
:class:`BNCI2014_002`,14,15,2,80,5s,512Hz,1,8,17920
:class:`BNCI2014_004`,9,3,2,360,4.5s,250Hz,5,1,32400
:class:`BNCI2015_001`,12,13,2,200,5s,512Hz,3,1,14400
:class:`BNCI2015_004`,9,30,5,80,7s,256Hz,2,1,7200
:class:`Cho2017`,52,64,2,100,3s,512Hz,1,1,9800
:class:`Lee2019_MI`,54,62,2,100,4s,1000Hz,2,1,11000
:class:`GrosseWentrup2009`,10,128,2,150,7s,500Hz,1,1,3000
:class:`Schirrmeister2017`,14,128,4,120,4s,500Hz,1,2,13440
:class:`Ofner2017`,15,61,7,60,3s,512Hz,1,10,63000
:class:`PhysionetMI`,109,64,4,23,3s,160Hz,1,1,69760
:class:`Shin2017A`,29,30,2,30,10s,200Hz,3,1,5220
:class:`Shin2017B`,29,30,2,30,10s,200Hz,3,1,5220
:class:`Weibo2014`,10,60,7,80,4s,200Hz,1,1,5600
:class:`Zhou2016`,4,14,3,160,5s,250Hz,3,2,11496
:class:`Stieger2021`,62,64,4,450,3s,1000Hz,7 or 11,1,250000

P300/ERP
======================

.. csv-table::
:header: Dataset, #Subj, #Chan, #Trials / class, Trials length, Sampling rate, #Sessions
:file: ../build/summary_p300.csv
:header-rows: 1
:class: sortable

:class:`BNCI2014_008`, 8, 8, 3500 NT / 700 T, 1s, 256Hz, 1
:class:`BNCI2014_009`, 10, 16, 1440 NT / 288 T, 0.8s, 256Hz, 3
:class:`BNCI2015_003`, 10, 8, 1500 NT / 300 T, 0.8s, 256Hz, 1
:class:`BI2012`, 25, 16, 640 NT / 128 T, 1s, 128Hz, 2
:class:`BI2013a`, 24, 16, 3200 NT / 640 T, 1s, 512Hz, 8 for subjects 1-7 else 1
:class:`BI2014a`, 64, 16, 990 NT / 198 T, 1s, 512Hz, up to 3
:class:`BI2014b`, 38, 32, 200 NT / 40 T, 1s, 512Hz, 3
:class:`BI2015a`, 43, 32, 4131 NT / 825 T, 1s, 512Hz, 3
:class:`BI2015b`, 44, 32, 2160 NT / 480 T, 1s, 512Hz, 1
:class:`Cattan2019_VR`, 21, 16, 600 NT / 120 T, 1s, 512Hz, 2
:class:`Huebner2017`, 13, 31, 364 NT / 112 T, 0.9s, 1000Hz, 3
:class:`Huebner2018`, 12, 31, 364 NT / 112 T, 0.9s, 1000Hz, 3
:class:`Sosulski2019`, 13, 31, 7500 NT / 1500 T, 1.2s, 1000Hz, 1
:class:`EPFLP300`, 8, 32, 2753 NT / 551 T, 1s, 2048Hz, 4
:class:`Lee2019_ERP`, 54, 62, 6900 NT / 1380 T, 1s, 1000Hz, 2


SSVEP
======================


.. csv-table::
:header: Dataset, #Subj, #Chan, #Classes, #Trials / class, Trials length, Sampling rate, #Sessions
:file: ../build/summary_ssvep.csv
:header-rows: 1
:class: sortable

:class:`Lee2019_SSVEP`,54,62,4,50,4s,1000Hz,2
:class:`Kalunga2016`,12,8,4,16,2s,256Hz,1
:class:`MAMEM1`,10,256,5,12-15,3s,250Hz,1
:class:`MAMEM2`,10,256,5,20-30,3s,250Hz,1
:class:`MAMEM3`,10,14,4,20-30,3s,128Hz,1
:class:`Nakanishi2015`,9,8,12,15,4.15s,256Hz,1
:class:`Wang2016`,34,62,40,6,5s,250Hz,1

c-VEP
======================
@@ -97,17 +59,10 @@ potentials (c-VEP): A literature review. Journal of Neural Engineering, 18(6), 0
DOI: https://doi.org/10.1088/1741-2552/ac38cf

.. csv-table::
:header: Dataset, #Subj, #Sessions, Sampling rate, #Chan, Trials length, #Trial classes, #Trials / class, #Epochs classes, #Epochs / class, Codes, Presentation rate
:file: ../build/summary_cvep.csv
:header-rows: 1
:class: sortable

:class:`Thielen2015`,12,1,2048Hz,64,4.2s,36,3,2,27216 NT / 27216 T,Gold codes,120Hz
:class:`Thielen2021`,30,1,512Hz,8,31.5s,20,5,2,18900 NT / 18900 T,Gold codes,60Hz
:class:`CastillosCVEP100`, 12,1,500Hz,32,2.2s,4,15/15/15/15,2,3525 NT / 3495 T,m-sequence,60Hz
:class:`CastillosCVEP40`, 12,1,500Hz,32,2.2s,4,15/15/15/15,2,3525 NT / 3495 T,m-sequence,60Hz
:class:`CastillosBurstVEP40`, 12,1,500Hz,32,2.2s,4,15/15/15/15,2,5820 NT / 1200 T,Burst-CVEP,60Hz
:class:`CastillosBurstVEP100`,12,1,500Hz,32,2.2s,4,15/15/15/15,2,5820 NT / 1200 T,Burst-CVEP,60Hz


Resting States
======================

@@ -116,12 +71,9 @@ For example, recoding the EEG of a subject while s/he is having the eye closed o
is a resting state experiment.

.. csv-table::
:header: Dataset, #Subj, #Chan, #Classes, #Blocks / class, Trials length, Sampling rate, #Sessions
:class: sortable

:class:`Cattan2019_PHMD`,12,16,2,10,60s,512Hz,1
:class:`Hinss2021`,15,62,4,1,2s,250Hz,1
:class:`Rodrigues2017`,20,16,2,5,10s,512Hz,1
:file: ../build/summary_rstate.csv
:header-rows: 1
:class: sortable

Compound Datasets
======================
2 changes: 1 addition & 1 deletion docs/source/datasets.rst
Original file line number Diff line number Diff line change
@@ -31,7 +31,7 @@ Motor Imagery Datasets
Weibo2014
Zhou2016
Stieger2021

Liu2024

------------
ERP Datasets
31 changes: 27 additions & 4 deletions docs/source/whats_new.rst
Original file line number Diff line number Diff line change
@@ -17,19 +17,40 @@ Develop branch

Enhancements
~~~~~~~~~~~~
- None

Bugs
~~~~
- None

- Fix Stieger2021 dataset bugs (:gh:`651` by `Martin Wimpff`_)
- Unpinning major version Scikit-learn and numpy (:gh:`652` by `Bruno Aristimunha`_)

API changes
~~~~~~~~~~~
- None

Version - 1.1.0 (Stable - PyPi)
Version - 1.1.1 (Stable - PyPi)
---------------------------------

Enhancements
~~~~~~~~~~~~
- Add possibility to use OptunaGridSearch (:gh:`630` by `Igor Carrara`_)
- Add scripts to upload results on PapersWithCode (:gh:`561` by `Pierre Guetschel`_)
- Centralize dataset summary tables in CSV files (:gh:`635` by `Pierre Guetschel`_)
- Add new dataset :class:`moabb.datasets.Liu2024` dataset (:gh:`619` by `Taha Habib`_)


Bugs
~~~~
- Fix caching in the workflows (:gh:`632` by `Pierre Guetschel`_)

API changes
~~~~~~~~~~~
- Include optuna as soft-dependency in the benchmark function and in the base of evaluation (:gh:`630` by `Igor Carrara`_)



Version - 1.1.0
----------------


Enhancements
~~~~~~~~~~~~
@@ -444,6 +465,7 @@ Bugs
API changes
~~~~~~~~~~~
- None
.. _Martin Wimpff: https://github.com/martinwimpff
.. _Reinmar Kobler: https://github.com/rkobler
.. _Gabriel Schwartz: https://github.com/Kaos9001
.. _Sara Sedlar: https://github.com/Sara04
@@ -474,3 +496,4 @@ API changes
.. _Brian Irvine: https://github.com/brianjohannes
.. _Bruna Lopes: https://github.com/brunaafl
.. _Yash Chauhan: https://github.com/jiggychauhi
.. _Taha Habib: https://github.com/tahatt13
6 changes: 3 additions & 3 deletions examples/plot_Hinss2021_classification.py
Original file line number Diff line number Diff line change
@@ -43,6 +43,7 @@

set_log_level("info")


##############################################################################
# Create util transformer
# ----------------------
@@ -103,12 +104,11 @@ def transform(self, X):

# To reduce the computation time in the example, we will only use the
# first two subjects.
start_subject = 1
stop_subject = 2
n__subjects = 2
title = "Datasets: "
for dataset in datasets:
title = title + " " + dataset.code
dataset.subject_list = dataset.subject_list[start_subject:stop_subject]
dataset.subject_list = dataset.subject_list[:n__subjects]

##############################################################################
# Create Pipelines
2 changes: 1 addition & 1 deletion moabb/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# flake8: noqa
__version__ = "1.1.0"
__version__ = "1.1.1"

from .benchmark import benchmark
from .utils import make_process_pipelines, set_download_dir, set_log_level, setup_seed
8 changes: 8 additions & 0 deletions moabb/benchmark.py
Original file line number Diff line number Diff line change
@@ -45,6 +45,7 @@ def benchmark( # noqa: C901
exclude_datasets=None,
n_splits=None,
cache_config=None,
optuna=False,
):
"""Run benchmarks for selected pipelines and datasets.
@@ -102,6 +103,7 @@ def benchmark( # noqa: C901
and exclude_datasets are specified, raise an error.
exclude_datasets: list of str or Dataset object
Datasets to exclude from the benchmark run
optuna: Enable Optuna for the hyperparameter search
Returns
-------
@@ -110,7 +112,11 @@ def benchmark( # noqa: C901
Notes
-----
.. versionadded:: 1.1.1
Includes the possibility to use Optuna for hyperparameter search.
.. versionadded:: 0.5.0
Create the function to run the benchmark
"""
# set logs
if evaluations is None:
@@ -182,6 +188,7 @@ def benchmark( # noqa: C901
return_epochs=True,
n_splits=n_splits,
cache_config=cache_config,
optuna=optuna,
)
paradigm_results = context.process(
pipelines=ppl_with_epochs, param_grid=param_grid
@@ -202,6 +209,7 @@ def benchmark( # noqa: C901
overwrite=overwrite,
n_splits=n_splits,
cache_config=cache_config,
optuna=optuna,
)
paradigm_results = context.process(
pipelines=ppl_with_array, param_grid=param_grid
27 changes: 0 additions & 27 deletions moabb/datasets/Lee2019.py
Original file line number Diff line number Diff line change
@@ -220,15 +220,6 @@ def data_path(
class Lee2019_MI(Lee2019):
"""BMI/OpenBMI dataset for MI.
.. admonition:: Dataset summary
========== ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
========== ======= ======= ========== ================= ============ =============== ===========
Lee2019_MI 54 62 2 100 4s 1000Hz 2
========== ======= ======= ========== ================= ============ =============== ===========
Dataset from Lee et al 2019 [1]_.
**Dataset Description**
@@ -290,15 +281,6 @@ class Lee2019_MI(Lee2019):
class Lee2019_ERP(Lee2019):
"""BMI/OpenBMI dataset for P300.
.. admonition:: Dataset summary
=========== ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
=========== ======= ======= ================= =============== =============== ===========
Lee2019_ERP 54 62 6900 NT / 1380 T 1s 1000Hz 2
=========== ======= ======= ================= =============== =============== ===========
Dataset from Lee et al 2019 [1]_.
**Dataset Description**
@@ -380,15 +362,6 @@ class Lee2019_ERP(Lee2019):
class Lee2019_SSVEP(Lee2019):
"""BMI/OpenBMI dataset for SSVEP.
.. admonition:: Dataset summary
============= ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
============= ======= ======= ========== ================= =============== =============== ===========
Lee2019_SSVEP 54 62 4 50 4s 1000Hz 2
============= ======= ======= ========== ================= =============== =============== ===========
Dataset from Lee et al 2019 [1]_.
**Dataset Description**
9 changes: 0 additions & 9 deletions moabb/datasets/Weibo2014.py
Original file line number Diff line number Diff line change
@@ -64,15 +64,6 @@ def get_subjects(sub_inds, sub_names, ind):
class Weibo2014(BaseDataset):
"""Motor Imagery dataset from Weibo et al 2014.
.. admonition:: Dataset summary
========= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
========= ======= ======= ========== ================= ============ =============== ===========
Weibo2014 10 60 7 80 4s 200Hz 1
========= ======= ======= ========== ================= ============ =============== ===========
Dataset from the article *Evaluation of EEG oscillatory patterns and
cognitive process during simple and compound limb motor imagery* [1]_.
9 changes: 0 additions & 9 deletions moabb/datasets/Zhou2016.py
Original file line number Diff line number Diff line change
@@ -50,15 +50,6 @@ def local_data_path(base_path, subject):
class Zhou2016(BaseDataset):
"""Motor Imagery dataset from Zhou et al 2016.
.. admonition:: Dataset summary
======== ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
======== ======= ======= ========== ================= ============ =============== ===========
Zhou2016 4 14 3 160 5s 250Hz 3
======== ======= ======= ========== ================= ============ =============== ===========
Dataset from the article *A Fully Automated Trial Selection Method for
Optimization of Motor Imagery Based Brain-Computer Interface* [1]_.
This dataset contains data recorded on 4 subjects performing 3 type of
1 change: 1 addition & 0 deletions moabb/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -61,6 +61,7 @@
from .hinss2021 import Hinss2021
from .huebner_llp import Huebner2017, Huebner2018
from .Lee2019 import Lee2019_ERP, Lee2019_MI, Lee2019_SSVEP
from .liu2024 import Liu2024
from .mpi_mi import MunichMI # noqa: F401
from .mpi_mi import GrosseWentrup2009
from .neiry import DemonsP300
9 changes: 0 additions & 9 deletions moabb/datasets/alex_mi.py
Original file line number Diff line number Diff line change
@@ -12,15 +12,6 @@
class AlexMI(BaseDataset):
"""Alex Motor Imagery dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
====== ======= ======= ========== ================= ============ =============== ===========
AlexMI 8 16 3 20 3s 512Hz 1
====== ======= ======= ========== ================= ============ =============== ===========
Motor imagery dataset from the PhD dissertation of A. Barachant [1]_.
This Dataset contains EEG recordings from 8 subjects, performing 2 task of
11 changes: 0 additions & 11 deletions moabb/datasets/alphawaves.py
Original file line number Diff line number Diff line change
@@ -17,16 +17,6 @@
class Rodrigues2017(BaseDataset):
"""Alphawaves dataset
.. admonition:: Dataset summary
=============== ======= ======= ========== =============== ============ =============== ===========
Name #Subj #Chan #Classes #Blocks/class Trials len Sampling rate #Sessions
=============== ======= ======= ========== =============== ============ =============== ===========
Rodrigues2017 20 16 2 5 10s 512Hz 1
=============== ======= ======= ========== =============== ============ =============== ===========
Dataset containing EEG recordings of subjects in a simple
resting-state eyes open/closed experimental protocol. Data were recorded
during a pilot experiment taking place in the GIPSA-lab, Grenoble,
@@ -139,7 +129,6 @@ def _get_single_subject_data(self, subject):
def data_path(
self, subject, path=None, force_update=False, update_path=None, verbose=None
):

if subject not in self.subject_list:
raise (ValueError("Invalid subject number"))

92 changes: 91 additions & 1 deletion moabb/datasets/base.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""Base class for a dataset."""

from __future__ import annotations

import abc
import logging
import re
@@ -9,6 +11,7 @@
from pathlib import Path
from typing import Dict, Union

import pandas as pd
from sklearn.pipeline import Pipeline

from moabb.datasets.bids_interface import StepType, _interface_map
@@ -18,6 +21,36 @@
log = logging.getLogger(__name__)


def get_summary_table(paradigm: str, dir_name: str | None = None):
if dir_name is None:
dir_name = Path(__file__).parent
path = Path(dir_name) / f"summary_{paradigm}.csv"
df = pd.read_csv(
path,
header=0,
index_col="Dataset",
skipinitialspace=True,
dtype={"PapersWithCode leaderboard": str},
)
return df


_summary_table_imagery = get_summary_table("imagery")
_summary_table_p300 = get_summary_table("p300")
_summary_table_ssvep = get_summary_table("ssvep")
_summary_table_cvep = get_summary_table("cvep")
_summary_table_rstate = get_summary_table("rstate")
_summary_table = pd.concat(
[
_summary_table_imagery,
_summary_table_p300,
_summary_table_ssvep,
_summary_table_cvep,
_summary_table_rstate,
],
)


@dataclass
class CacheConfig:
"""
@@ -178,7 +211,64 @@ def check_run_names(data):
)


class BaseDataset(metaclass=abc.ABCMeta):
def format_row(row: pd.Series):
pwc_key = "PapersWithCode leaderboard"
tab_prefix = " " * 8
tab_sep = "="
row = row[~row.isna()]
pwc_link = row.get(pwc_key, None)
if pwc_link is not None:
row = row.drop(pwc_key)
col_names = [str(col) for col in row.index]

def to_int(x):
try:
i = int(x)
if i == x:
return i
return x
except ValueError:
return x

values = [str(to_int(val)) for val in row.values]
widths = [max(len(col), len(val)) for col, val in zip(col_names, values)]
row_sep = " ".join([tab_sep * width for width in widths])
cols_row = " ".join([col.rjust(width) for col, width in zip(col_names, widths)])
values_row = " ".join([val.rjust(width) for val, width in zip(values, widths)])
out = (
" .. admonition:: Dataset summary\n\n"
f"{tab_prefix}{row_sep}\n"
f"{tab_prefix}{cols_row}\n"
f"{tab_prefix}{row_sep}\n"
f"{tab_prefix}{values_row}\n"
f"{tab_prefix}{row_sep}"
)
if pwc_link is not None:
out = f" **{pwc_key}:** {pwc_link}\n\n" + out
return out


class MetaclassDataset(abc.ABCMeta):
def __new__(cls, name, bases, attrs):
doc = attrs.get("__doc__", "")
try:
row = _summary_table.loc[name]
row_str = format_row(row)
doc_list = doc.split("\n\n")
if len(doc_list) >= 2:
doc_list = [doc_list[0], row_str] + doc_list[1:]
else:
doc_list.append(row_str)
attrs["__doc__"] = "\n\n".join(doc_list)
except KeyError:
log.debug(
f"No description found for dataset {name}. "
f"Complete the appropriate moabb/datasets/summary_*.csv file"
)
return super().__new__(cls, name, bases, attrs)


class BaseDataset(metaclass=MetaclassDataset):
"""Abstract Moabb BaseDataset.
Parameters required for all datasets
18 changes: 0 additions & 18 deletions moabb/datasets/bbci_eeg_fnirs.py
Original file line number Diff line number Diff line change
@@ -192,15 +192,6 @@ def data_path(
class Shin2017A(BaseShin2017):
"""Motor Imagey Dataset from Shin et al 2017.
.. admonition:: Dataset summary
========= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
========= ======= ======= ========== ================= ============ =============== ===========
Shin2017A 29 30 2 30 10s 200Hz 3
========= ======= ======= ========== ================= ============ =============== ===========
Dataset from [1]_.
@@ -315,15 +306,6 @@ def __init__(self, accept=False):
class Shin2017B(BaseShin2017):
"""Mental Arithmetic Dataset from Shin et al 2017.
.. admonition:: Dataset summary
========= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
========= ======= ======= ========== ================= ============ =============== ===========
Shin2017B 29 30 2 30 10s 200Hz 3
========= ======= ======= ========== ================= ============ =============== ===========
Dataset from [1]_.
.. caution::
71 changes: 0 additions & 71 deletions moabb/datasets/bnci.py
Original file line number Diff line number Diff line change
@@ -756,15 +756,6 @@ def data_path(
class BNCI2014_001(MNEBNCI):
"""BNCI 2014-001 Motor Imagery dataset.
.. admonition:: Dataset summary
============ ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
============ ======= ======= ========== ================= ============ =============== ===========
BNCI2014_001 9 22 4 144 4s 250Hz 2
============ ======= ======= ========== ================= ============ =============== ===========
Dataset IIa from BCI Competition 4 [1]_.
**Dataset Description**
@@ -820,15 +811,6 @@ def __init__(self):
class BNCI2014_002(MNEBNCI):
"""BNCI 2014-002 Motor Imagery dataset.
.. admonition:: Dataset summary
============ ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
============ ======= ======= ========== ================= ============ =============== ===========
BNCI2014_002 14 15 2 80 5s 512Hz 1
============ ======= ======= ========== ================= ============ =============== ===========
Motor Imagery Dataset from [1]_.
**Dataset description**
@@ -882,15 +864,6 @@ def __init__(self):
class BNCI2014_004(MNEBNCI):
"""BNCI 2014-004 Motor Imagery dataset.
.. admonition:: Dataset summary
============ ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
============ ======= ======= ========== ================= ============ =============== ===========
BNCI2014_004 9 3 2 360 4.5s 250Hz 5
============ ======= ======= ========== ================= ============ =============== ===========
Dataset B from BCI Competition 2008.
**Dataset description**
@@ -965,15 +938,6 @@ def __init__(self):
class BNCI2014_008(MNEBNCI):
"""BNCI 2014-008 P300 dataset.
.. admonition:: Dataset summary
============ ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
============ ======= ======= ================= =============== =============== ===========
BNCI2014_008 8 8 3500 NT / 700 T 1s 256Hz 1
============ ======= ======= ================= =============== =============== ===========
Dataset from [1]_.
**Dataset description**
@@ -1036,15 +1000,6 @@ def __init__(self):
class BNCI2014_009(MNEBNCI):
"""BNCI 2014-009 P300 dataset.
.. admonition:: Dataset summary
============ ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
============ ======= ======= ================= =============== =============== ===========
BNCI2014_009 10 16 1440 NT / 288 T 0.8s 256Hz 3
============ ======= ======= ================= =============== =============== ===========
Dataset from [1]_.
**Dataset description**
@@ -1098,15 +1053,6 @@ def __init__(self):
class BNCI2015_001(MNEBNCI):
"""BNCI 2015-001 Motor Imagery dataset.
.. admonition:: Dataset summary
============ ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
============ ======= ======= ========== ================= ============ =============== ===========
BNCI2015_001 12 13 2 200 5s 512Hz 2
============ ======= ======= ========== ================= ============ =============== ===========
Dataset from [1]_.
**Dataset description**
@@ -1154,14 +1100,6 @@ def __init__(self):
class BNCI2015_003(MNEBNCI):
"""BNCI 2015-003 P300 dataset.
.. admonition:: Dataset summary
============ ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
============ ======= ======= ================= =============== =============== ===========
BNCI2015_003 10 8 1500 NT / 300 T 0.8s 256Hz 1
============ ======= ======= ================= =============== =============== ===========
Dataset from [1]_.
@@ -1197,15 +1135,6 @@ def __init__(self):
class BNCI2015_004(MNEBNCI):
"""BNCI 2015-004 Motor Imagery dataset.
.. admonition:: Dataset summary
============ ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
============ ======= ======= ========== ================= ============ =============== ===========
BNCI2015_004 9 30 5 80 7s 256Hz 2
============ ======= ======= ========== ================= ============ =============== ===========
Dataset from [1]_.
**Dataset description**
51 changes: 0 additions & 51 deletions moabb/datasets/braininvaders.py
Original file line number Diff line number Diff line change
@@ -416,13 +416,6 @@ def _bi_data_path( # noqa: C901
class BI2012(BaseDataset):
"""P300 dataset BI2012 from a "Brain Invaders" experiment.
.. admonition:: Dataset summary
================ ======= ======= ================ =============== =============== ===========
Name #Subj #Chan #Trials/class Trials length Sampling Rate #Sessions
================ ======= ======= ================ =============== =============== ===========
BI2012 25 16 640 NT / 128 T 1s 128Hz 2
================ ======= ======= ================ =============== =============== ===========
Dataset following the setup from [1]_ carried-out at University of
Grenoble Alpes.
@@ -483,15 +476,6 @@ def data_path(
class BI2013a(BaseDataset):
"""P300 dataset BI2013a from a "Brain Invaders" experiment.
.. admonition:: Dataset summary
======= ======= ======= ================= =============== =============== =================
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
======= ======= ======= ================= =============== =============== =================
BI2013a 24 16 3200 NT / 640 T 1s 512Hz (1-7)8 s|(8-24)1s
======= ======= ======= ================= =============== =============== =================
Dataset following the setup from [1]_ carried-out at University of
Grenoble Alpes.
@@ -586,13 +570,6 @@ def data_path(
class BI2014a(BaseDataset):
"""P300 dataset BI2014a from a "Brain Invaders" experiment.
.. admonition:: Dataset summary
================ ======= ======= ================ =============== =============== ===========
Name #Subj #Chan #Trials/class Trials length Sampling Rate #Sessions
================ ======= ======= ================ =============== =============== ===========
BI2014a 64 16 5 NT x 1 T 1s 512Hz up to 3
================ ======= ======= ================ =============== =============== ===========
This dataset contains electroencephalographic (EEG) recordings of 71 subjects
playing to a visual P300 Brain-Computer Interface (BCI) videogame named Brain Invaders.
The interface uses the oddball paradigm on a grid of 36 symbols (1 Target, 35 Non-Target)
@@ -645,13 +622,6 @@ def data_path(
class BI2014b(BaseDataset):
"""P300 dataset BI2014b from a "Brain Invaders" experiment.
.. admonition:: Dataset summary
================ ======= ======= ================ =============== =============== ===========
Name #Subj #Chan #Trials/class Trials length Sampling Rate #Sessions
================ ======= ======= ================ =============== =============== ===========
BI2014b 38 32 5 NT x 1 T 1s 512Hz 3
================ ======= ======= ================ =============== =============== ===========
This dataset contains electroencephalographic (EEG) recordings of 38 subjects playing in
pair (19 pairs) to the multi-user version of a visual P300-based Brain-Computer Interface (BCI)
named Brain Invaders. The interface uses the oddball paradigm on a grid of 36 symbols (1 Target,
@@ -705,13 +675,6 @@ def data_path(
class BI2015a(BaseDataset):
"""P300 dataset BI2015a from a "Brain Invaders" experiment.
.. admonition:: Dataset summary
================ ======= ======= ================ =============== =============== ===========
Name #Subj #Chan #Trials/class Trials length Sampling Rate #Sessions
================ ======= ======= ================ =============== =============== ===========
BI2015a 43 32 5 NT x 1 T 1s 512Hz 3
================ ======= ======= ================ =============== =============== ===========
This dataset contains electroencephalographic (EEG) recordings
of 43 subjects playing to a visual P300 Brain-Computer Interface (BCI)
videogame named Brain Invaders. The interface uses the oddball paradigm
@@ -766,13 +729,6 @@ def data_path(
class BI2015b(BaseDataset):
"""P300 dataset BI2015b from a "Brain Invaders" experiment.
.. admonition:: Dataset summary
================ ======= ======= ================ =============== =============== ===========
Name #Subj #Chan #Trials/class Trials length Sampling Rate #Sessions
================ ======= ======= ================ =============== =============== ===========
BI2015b 44 32 5 NT x 1 T 1s 512Hz 1
================ ======= ======= ================ =============== =============== ===========
This dataset contains electroencephalographic (EEG) recordings
of 44 subjects playing in pair to the multi-user version of a visual
P300 Brain-Computer Interface (BCI) named Brain Invaders. The interface
@@ -830,13 +786,6 @@ def data_path(
class Cattan2019_VR(BaseDataset):
"""Dataset of an EEG-based BCI experiment in Virtual Reality using P300.
.. admonition:: Dataset summary
============== ======= ======= ================ =============== =============== ===========
Name #Subj #Chan #Trials/class Trials length Sampling Rate #Sessions
============== ======= ======= ================ =============== =============== ===========
Cattan2019_VR 21 16 600 NT / 120 T 1s 512Hz 2
============== ======= ======= ================ =============== =============== ===========
We describe the experimental procedures for a dataset that we have made publicly
available at https://doi.org/10.5281/zenodo.2605204 in mat (Mathworks, Natick, USA)
and csv formats [1]_. This dataset contains electroencephalographic recordings on 21
32 changes: 0 additions & 32 deletions moabb/datasets/castillos2023.py
Original file line number Diff line number Diff line change
@@ -293,14 +293,6 @@ class CastillosBurstVEP100(BaseCastillos2023):
Dataset [1]_ from the study on burst-VEP [2]_.
.. admonition:: Dataset summary
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
Name #Subj #Sessions Sampling rate #Chan Trials length #Trial classes #Trials / class #Epoch classes #Epochs / class Codes Presentation rate
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
CastillosBurstVEP100 12 1 500Hz 32 2.2s 4 15/15/15/15 2 5820NT/1200T Burst-CVEP 60Hz
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
**Dataset description**
Participants were comfortably seated and instructed to read and sign the informed consent. EEG data were recorded
@@ -355,14 +347,6 @@ class CastillosBurstVEP40(BaseCastillos2023):
Dataset [1]_ from the study on burst-VEP [2]_.
.. admonition:: Dataset summary
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
Name #Subj #Sessions Sampling rate #Chan Trials length #Trial classes #Trials / class #Epoch classes #Epochs / class Codes Presentation rate
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
CastillosBurstVEP40 12 1 500Hz 32 2.2s 4 15/15/15/15 2 5820NT/1200T Burst-CVEP 60Hz
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
**Dataset description**
Participants were comfortably seated and instructed to read and sign the informed consent. EEG data were recorded
@@ -417,15 +401,6 @@ class CastillosCVEP100(BaseCastillos2023):
Dataset [1]_ from the study on burst-VEP [2]_.
.. admonition:: Dataset summary
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
Name #Subj #Sessions Sampling rate #Chan Trials length #Trial classes #Trials / class #Epoch classes #Epochs / class Codes Presentation rate
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
CastillosCVEP100 12 1 500Hz 32 2.2s 4 15/15/15/15 2 3525NT/3495T m-sequence 60Hz
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
**Dataset description**
Participants were comfortably seated and instructed to read and sign the informed consent. EEG data were recorded
@@ -480,13 +455,6 @@ class CastillosCVEP40(BaseCastillos2023):
Dataset [1]_ from the study on burst-VEP [2]_.
.. admonition:: Dataset summary
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
Name #Subj #Sessions Sampling rate #Chan Trials length #Trial classes #Trials / class #Epoch classes #Epochs / class Codes Presentation rate
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
CastillosCVEP40 12 1 500Hz 32 2.2s 4 15/15/15/15 2 3525NT/3495T m-sequence 60Hz
==================== ======= ========= ============= ===== ============= ============== =============== ============== =============== ========== =================
**Dataset description**
Participants were comfortably seated and instructed to read and sign the informed consent. EEG data were recorded
9 changes: 0 additions & 9 deletions moabb/datasets/epfl.py
Original file line number Diff line number Diff line change
@@ -18,15 +18,6 @@
class EPFLP300(BaseDataset):
"""P300 dataset from Hoffmann et al 2008.
.. admonition:: Dataset summary
======== ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
======== ======= ======= ================= =============== =============== ===========
EPFLP300 8 32 2753 NT / 551 T 1s 2048Hz 4
======== ======= ======= ================= =============== =============== ===========
Dataset from the paper [1]_.
**Dataset Description**
9 changes: 0 additions & 9 deletions moabb/datasets/gigadb.py
Original file line number Diff line number Diff line change
@@ -19,15 +19,6 @@
class Cho2017(BaseDataset):
"""Motor Imagery dataset from Cho et al 2017.
.. admonition:: Dataset summary
======= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
======= ======= ======= ========== ================= ============ =============== ===========
Cho2017 52 64 2 100 3s 512Hz 1
======= ======= ======= ========== ================= ============ =============== ===========
Dataset from the paper [1]_.
**Dataset Description**
8 changes: 0 additions & 8 deletions moabb/datasets/hinss2021.py
Original file line number Diff line number Diff line change
@@ -17,14 +17,6 @@
class Hinss2021(BaseDataset):
"""Neuroergonomic 2021 dataset.
.. admonition:: Dataset summary
=========== ======= ======= ========== =============== ============ =============== ===========
Name #Subj #Chan #Classes #Blocks/class Trials len Sampling rate #Sessions
=========== ======= ======= ========== =============== ============ =============== ===========
Hinss2021 15 62 4 1 2s 250Hz 2
=========== ======= ======= ========== =============== ============ =============== ===========
We describe the experimental procedures for a dataset that is publicly available
at https://zenodo.org/records/5055046.
This dataset contains electroencephalographic recordings of 15 subjects (6 female, with an
18 changes: 0 additions & 18 deletions moabb/datasets/huebner_llp.py
Original file line number Diff line number Diff line change
@@ -117,15 +117,6 @@ class Huebner2017(_BaseVisualMatrixSpellerDataset):
"""Learning from label proportions for a visual matrix speller (ERP)
dataset from Hübner et al 2017 [1]_.
.. admonition:: Dataset summary
=========== ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
=========== ======= ======= ================= =============== =============== ===========
Huebner2017 13 31 364 NT / 112 T 0.9s 1000Hz 3
=========== ======= ======= ================= =============== =============== ===========
**Dataset description**
The subjects were asked to spell the sentence: “Franzy jagt im komplett verwahrlosten Taxi quer durch Freiburg”.
@@ -184,15 +175,6 @@ class Huebner2018(_BaseVisualMatrixSpellerDataset):
"""Mixture of LLP and EM for a visual matrix speller (ERP) dataset from
Hübner et al 2018 [1]_.
.. admonition:: Dataset summary
=========== ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
=========== ======= ======= ================= =============== =============== ===========
Huebner2018 12 31 364 NT / 112 T 0.9s 1000Hz 3
=========== ======= ======= ================= =============== =============== ===========
**Dataset description**
Within a single session, a subject was asked to spell the beginning of a sentence in each of three blocks.The text
340 changes: 340 additions & 0 deletions moabb/datasets/liu2024.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,340 @@
"""Liu2024 Motor imagery dataset."""

import os
import shutil
import warnings
import zipfile as z
from pathlib import Path
from typing import Any, Dict, Tuple

import mne
import numpy as np
import pandas as pd
from mne.channels import read_custom_montage

from moabb.datasets import download as dl
from moabb.datasets.base import BaseDataset


# Link to the raw data
LIU2024_URL = "https://figshare.com/ndownloader/files/38516654"

# Links to the electrodes and events information files
LIU2024_ELECTRODES = "https://figshare.com/ndownloader/files/38516078"
LIU2024_EVENTS = "https://figshare.com/ndownloader/files/38516084"


class Liu2024(BaseDataset):
"""
Dataset [1]_ from the study on motor imagery [2]_.
**Dataset description**
This dataset includes data from 50 acute stroke patients (the time after stroke ranges from 1 day to 30 days)
admitted to the stroke unit of Xuanwu Hospital of Capital Medical University. The patients included 39 males (78%)
and 11 females (22%), aged between 31 and 77 years, with an average age of 56.70 years (SD = 10.57)
Before the start of the experiment, the subject sat in a chair in a position as comfortable as possible with an
EEG cap placed on their head; subjects were positioned approximately 80 cm away from a computer screen in front of them.
The computer played audio instructions to the patient about the procedure. Each experiment lasted approximately 20 minutes,
including preparation time and approximately 10 minutes of signal recording. Before the start of the MI experiment,
the patients opened their eyes and closed their eyes for 1 minute each. The MI experiment was divided into 40 trials, and
each trial took 8 seconds, which consisted of three stages (instruction, MI and break). In the instruction stage, patients
were prompted to imagine grasping a spherical object with the left- or right-hand. In the MI stage, participants imagined
performing this action, a video of gripping motion is played on the computer, which leads the patient imagine grabbing the
ball. This video stays playing for 4 s. Patients only imagine one hand movement.In the break stage, participants were allowed
to relax and rest. The MI experiments alternated between the left- and right-hand, and the patients moved onto the next stage
of the experiment according to the instructions.
The EEG data were collected through a wireless multichannel EEG acquisition system (ZhenTec NT1, Xi’an ZhenTec Intelligence
Technology Co., Ltd., China). The system includes an EEG cap, an EEG acquisition amplifier, a data receiver and host computer
software. The EEG cap had electrodes placed according to the international 10-10 system, including 29 EEG recording electrodes
and 2 electrooculography (EOG) electrodes. The reference electrode located at CPz position and the grounding electrode located
at FPz position. All the EEG electrodes and grounding electrode are Ag/AgCl semi-dry EEG electrodes based on highly absorbable
porous sponges that are dampened with 3% NaCl solution. The EOG electrodes are composed by Ag/AgCl electrodes and conductive
adhesive hydrogel. The common-mode rejection ratio was 120 dB, the input impedance was 1 GΩ, the input noise was less than
0.4 μVrms, and the resolution was 24 bits. The acquisition impedance was less than or equal to 20 kΩ. The sampling frequency
was 500 Hz.
References
----------
.. [1] Liu, Haijie; Lv, Xiaodong (2022). EEG datasets of stroke patients.
figshare. Dataset. DOI: https://doi.org/10.6084/m9.figshare.21679035.v5
.. [2] Liu, Haijie, Wei, P., Wang, H. et al. An EEG motor imagery dataset
for brain computer interface in acute stroke patients. Sci Data 11, 131
(2024). DOI: https://doi.org/10.1038/s41597-023-02787-8
Notes
-----
To add the break and instruction events, set the `break_events` and
`instr_events` parameters to True while instantiating the class.
.. versionadded:: 1.1.1
"""

def __init__(self, break_events=False, instr_events=False):
self.break_events = break_events
self.instr_events = instr_events
events = {"left_hand": 1, "right_hand": 2}
if break_events:
events["instr"] = 3
if instr_events:
events["break"] = 4
super().__init__(
subjects=list(range(1, 50 + 1)),
sessions_per_subject=1,
events=events,
code="Liu2024",
interval=(2, 6),
paradigm="imagery",
doi="10.1038/s41597-023-02787-8",
)

def data_path(
self, subject, path=None, force_update=False, update_path=None, verbose=None
):
"""Return the data paths of a single subject.
Parameters
----------
subject : int
The subject number to fetch data for.
path : None | str
Location of where to look for the data storing location. If None,
the environment variable or config parameter MNE_(dataset) is used.
If it doesn’t exist, the “~/mne_data” directory is used. If the
dataset is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_(dataset)_PATH in mne-python config
to the given path.
If None, the user is prompted.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose()).
Returns
-------
list
A list containing the path to the subject's data file.
"""
if subject not in self.subject_list:
raise ValueError("Invalid subject number")

# Download the zip file containing the data
path_zip = dl.data_dl(LIU2024_URL, self.code)
path_zip = Path(path_zip)
path_folder = path_zip.parent

# Extract the zip file if it hasn't been extracted yet
if not (path_folder / "edffile").is_dir():
zip_ref = z.ZipFile(path_zip, "r")
zip_ref.extractall(path_folder)

subject_paths = []
sub = f"sub-{subject:02d}"

# Construct the path to the subject's data file
subject_path = (
path_folder / "edffile" / sub / "eeg" / f"{sub}_task-motor-imagery_eeg.edf"
)
subject_paths.append(str(subject_path))

return subject_paths

def encoding(self, events_df: pd.DataFrame) -> Tuple[np.array, Dict[int, str]]:
"""Encode the columns 'value' and 'trial_type' into a single event type.
Parameters
----------
events_df : pd.DataFrame
DataFrame containing the events information.
Returns
-------
np.ndarray
Array of encoded event types.
Notes
-----
The 'trial_type' variable can take the following values:
- 1 : Left hand
- 2 : Right hand
The 'value' variable can take the following values:
- 1 : instructions
- 2 : MI
- 3 : break
"""
# Define the mapping dictionary
encoding_mapping = {
(2, 2): 1, # Left hand, MI
(1, 2): 2, # Right hand, MI
}

mapping = {
1: "left_hand",
2: "right_hand",
}

if self.instr_events:
encoding_mapping.update(
{
(1, 1): 3, # Right hand, instructions
(2, 1): 3, # Left hand, instructions
}
)
mapping[3] = "instr"

if self.break_events:
encoding_mapping.update(
{
(1, 3): 4, # Right hand, break
(2, 3): 4, # Left hand, break
}
)
mapping[4] = "break"

# Filter out rows that won't be encoded
valid_tuples = encoding_mapping.keys()
events_df = events_df[
events_df.apply(
lambda row: (row["trial_type"], row["value"]) in valid_tuples, axis=1
)
]

# Apply the mapping to the DataFrame
event_category = events_df.apply(
lambda row: encoding_mapping[(row["trial_type"], row["value"])], axis=1
)

return event_category, mapping

def _get_single_subject_data(self, subject):
"""Return the data of a single subject.
Parameters
----------
subject : int
The subject number to fetch data for.
Returns
-------
dict
A dictionary containing the raw data for the subject.
"""

file_path_list = self.data_path(subject)[0]
path_electrodes, path_events = self.data_infos()

with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Read the subject's raw data
raw = mne.io.read_raw_edf(
file_path_list, verbose=False, infer_types=True, stim_channel=""
)

# Dropping reference channels with constant values
raw = raw.drop_channels(["CPz"])

# Renaming channels accurately
raw.rename_channels({"HEOR": "VEOR", "": "STI"})

# Create a dictionary with the channel names and their new types
mapping = {"STI": "stim", "VEOR": "eog", "HEOL": "eog"}

# Set the new channel types
raw.set_channel_types(mapping)

# Normalize and Read the montage
path_electrodes = self._normalize_extension(path_electrodes)
# Read and set the montage
montage = read_custom_montage(path_electrodes)

events_df = pd.read_csv(path_events, sep="\t")

# Encode the events
event_category, mapping = self.encoding(events_df=events_df)

events = self.create_event_array(raw=raw, event_category=event_category)

# Creating and setting annotations from the events
annotations = mne.annotations_from_events(
events, sfreq=raw.info["sfreq"], event_desc=mapping
)

raw = raw.set_annotations(annotations)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Removing the stimulus channels
raw = raw.pick(["eeg", "eog"])
# Setting the montage
raw = raw.set_montage(montage, verbose=False)
# Loading dataset
raw = raw.load_data(verbose=False)
# There is only one session
sessions = {"0": {"0": raw}}

return sessions

def data_infos(self):
"""Returns the data paths of the electrodes and events information
This function downloads the necessary data files for electrodes
and events from their respective URLs and returns their local file paths.
Returns
-------
tuple
A tuple containing the local file paths to the channels, electrodes,
and events information files.
"""

path_electrodes = dl.data_dl(LIU2024_ELECTRODES, self.code)

path_events = dl.data_dl(LIU2024_EVENTS, self.code)

return path_electrodes, path_events

@staticmethod
def _normalize_extension(file_name: str) -> str:
# Renaming the .tsv file to make sure it's recognized as .tsv
# Check if the file already has the ".tsv" extension

file_electrodes_tsv = file_name + ".tsv"

if not os.path.exists(file_electrodes_tsv):
# Perform the rename operation only if the target file
# doesn't exist
shutil.copy(file_name, file_electrodes_tsv)

return file_electrodes_tsv

@staticmethod
def create_event_array(raw: Any, event_category: np.ndarray) -> np.ndarray:
"""
This method creates an event array based on the stimulus channel.
Parameters
----------
raw : mne.io.Raw
The raw data.
event_category : np.ndarray
The event categories.
Returns
-------
events : np.ndarray
The created events array.
"""
_, idx_trigger = np.nonzero(raw.copy().pick("STI").get_data())
n_label_stim = len(event_category)
# Create the events array based on the stimulus channel
events = np.column_stack(
(idx_trigger[:n_label_stim], np.zeros_like(event_category), event_category)
)
return events
9 changes: 0 additions & 9 deletions moabb/datasets/mpi_mi.py
Original file line number Diff line number Diff line change
@@ -15,15 +15,6 @@
class GrosseWentrup2009(BaseDataset):
"""Munich Motor Imagery dataset.
.. admonition:: Dataset summary
================= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
================= ======= ======= ========== ================= ============ =============== ===========
GrosseWentrup2009 10 128 2 150 7s 500Hz 1
================= ======= ======= ========== ================= ============ =============== ===========
Motor imagery dataset from Grosse-Wentrup et al. 2009 [1]_.
A trial started with the central display of a white fixation cross. After 3
9 changes: 0 additions & 9 deletions moabb/datasets/neiry.py
Original file line number Diff line number Diff line change
@@ -15,15 +15,6 @@ class DemonsP300(BaseDataset):
"""Visual P300 dataset recorded in Virtual Reality (VR) game Raccoons
versus Demons.
.. admonition:: Dataset summary
========== ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
========== ======= ======= ================= =============== =============== ===========
DemonsP300 60 8 935 NT / 50 T 1s 500Hz 1
========== ======= ======= ================= =============== =============== ===========
.. danger::
This dataset contains major unresolved issues and could removed in the near futur. Use it in a benchmark
9 changes: 0 additions & 9 deletions moabb/datasets/phmd_ml.py
Original file line number Diff line number Diff line change
@@ -17,15 +17,6 @@
class Cattan2019_PHMD(BaseDataset):
"""Passive Head Mounted Display with Music Listening dataset.
.. admonition:: Dataset summary
============== ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Blocks/class Trials len Sampling rate #Sessions
=============== ======= ======= ========== ================= ============ =============== ===========
Cattan2019_PHMD 12 16 2 10 60s 512Hz 1
=============== ======= ======= ========== ================= ============ =============== ===========
We describe the experimental procedures for a dataset that we have made publicly available
at https://doi.org/10.5281/zenodo.2617084 in mat (Mathworks, Natick, USA) and csv formats.
This dataset contains electroencephalographic recordings of 12 subjects listening to music
9 changes: 0 additions & 9 deletions moabb/datasets/physionet_mi.py
Original file line number Diff line number Diff line change
@@ -14,15 +14,6 @@
class PhysionetMI(BaseDataset):
"""Physionet Motor Imagery dataset.
.. admonition:: Dataset summary
=========== ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
=========== ======= ======= ========== ================= ============ =============== ===========
PhysionetMI 109 64 4 23 3s 160Hz 1
=========== ======= ======= ========== ================= ============ =============== ===========
Physionet MI dataset: https://physionet.org/pn4/eegmmidb/
This data set consists of over 1500 one- and two-minute EEG recordings,
9 changes: 0 additions & 9 deletions moabb/datasets/schirrmeister2017.py
Original file line number Diff line number Diff line change
@@ -17,15 +17,6 @@
class Schirrmeister2017(BaseDataset):
"""High-gamma dataset described in Schirrmeister et al. 2017.
.. admonition:: Dataset summary
================= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
================= ======= ======= ========== ================= ============ =============== ===========
Schirrmeister2017 14 128 4 120 4s 500Hz 1
================= ======= ======= ========== ================= ============ =============== ===========
Dataset from [1]_
Our “High-Gamma Dataset” is a 128-electrode dataset (of which we later only use
9 changes: 0 additions & 9 deletions moabb/datasets/sosulski2019.py
Original file line number Diff line number Diff line change
@@ -19,15 +19,6 @@ class Sosulski2019(BaseDataset):
Dataset [1]_, study on spatial transfer between SOAs [2]_, actual paradigm / online optimization [3]_.
.. admonition:: Dataset summary
============= ======= ======= ================= =============== =============== ===========
Name #Subj #Chan #Trials / class Trials length Sampling rate #Sessions
============= ======= ======= ================= =============== =============== ===========
Sosulski2019 13 31 7500 NT / 1500 T 1.2s 1000Hz 1
============= ======= ======= ================= =============== =============== ===========
**Dataset description**
This dataset contains multiple small trials of an auditory oddball paradigm. The paradigm presented two different
sinusoidal tones. A low-pitched (500 Hz, 40 ms duration) non-target tone and a high-pitched (1000 Hz,
9 changes: 0 additions & 9 deletions moabb/datasets/ssvep_exo.py
Original file line number Diff line number Diff line change
@@ -15,15 +15,6 @@
class Kalunga2016(BaseDataset):
"""SSVEP Exo dataset.
.. admonition:: Dataset summary
=========== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
=========== ======= ======= ========== ================= =============== =============== ===========
Kalunga2016 12 8 4 16 2s 256Hz 1
=========== ======= ======= ========== ================= =============== =============== ===========
SSVEP dataset from E. Kalunga PhD in University of Versailles [1]_.
The datasets contains recording from 12 male and female subjects aged
27 changes: 0 additions & 27 deletions moabb/datasets/ssvep_mamem.py
Original file line number Diff line number Diff line change
@@ -170,15 +170,6 @@ def data_path(
class MAMEM1(BaseMAMEM):
"""SSVEP MAMEM 1 dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
====== ======= ======= ========== ================= =============== =============== ===========
MAMEM1 10 256 5 12-15 3s 250Hz 1
====== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
EEG signals with 256 channels captured from 11 subjects executing a
@@ -290,15 +281,6 @@ def __init__(self):
class MAMEM2(BaseMAMEM):
"""SSVEP MAMEM 2 dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
====== ======= ======= ========== ================= =============== =============== ===========
MAMEM2 10 256 5 20-30 3s 250Hz 1
====== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
EEG signals with 256 channels captured from 11 subjects executing a
@@ -383,15 +365,6 @@ def __init__(self):
class MAMEM3(BaseMAMEM):
"""SSVEP MAMEM 3 dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
====== ======= ======= ========== ================= =============== =============== ===========
MAMEM3 10 14 4 20-30 3s 128Hz 1
====== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
EEG signals with 14 channels captured from 11 subjects executing a
9 changes: 0 additions & 9 deletions moabb/datasets/ssvep_nakanishi.py
Original file line number Diff line number Diff line change
@@ -20,15 +20,6 @@
class Nakanishi2015(BaseDataset):
"""SSVEP Nakanishi 2015 dataset.
.. admonition:: Dataset summary
============= ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
============= ======= ======= ========== ================= =============== =============== ===========
Nakanishi2015 9 8 12 15 4.15s 256Hz 1
============= ======= ======= ========== ================= =============== =============== ===========
This dataset contains 12-class joint frequency-phase modulated steady-state
visual evoked potentials (SSVEPs) acquired from 10 subjects used to
estimate an online performance of brain-computer interface (BCI) in the
9 changes: 0 additions & 9 deletions moabb/datasets/ssvep_wang.py
Original file line number Diff line number Diff line change
@@ -24,15 +24,6 @@
class Wang2016(BaseDataset):
"""SSVEP Wang 2016 dataset.
.. admonition:: Dataset summary
======== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
======== ======= ======= ========== ================= =============== =============== ===========
Wang2016 34 62 40 6 5s 250Hz 1
======== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
This dataset gathered SSVEP-BCI recordings of 35 healthy subjects (17
22 changes: 9 additions & 13 deletions moabb/datasets/stieger2021.py
Original file line number Diff line number Diff line change
@@ -24,15 +24,6 @@
class Stieger2021(BaseDataset):
"""Motor Imagery dataset from Stieger et al. 2021.
.. admonition:: Dataset summary
============= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
============= ======= ======= ========== ================= ============ =============== ===========
Stieger2021 62 64 4 450 3s 1000Hz 10
============= ======= ======= ========== ================= ============ =============== ===========
The main goals of our original study were to characterize how individuals
learn to control SMR-BCIs and to test whether this learning can be improved
through behavioral interventions such as mindfulness training. Participants
@@ -236,8 +227,9 @@ def _get_single_subject_data(self, subject):
and (container.TrialData[i].triallength + 2) > self.interval[1]
):
# this should be the cue time-point
if container.time[i][2 * srate] == 0:
raise ValueError("This should be the cue time-point,")
assert (
container.time[i][2 * srate] == 0
), "This should be the cue time-point"
stim[2 * srate] = y
X_flat.append(x)
stim_flat.append(stim[None, :])
@@ -272,7 +264,11 @@ def _get_single_subject_data(self, subject):
badchanidxs = []

for idx in badchanidxs:
used_channels = ch_names if self.channels is None else self.channels
used_channels = (
ch_names
if (not hasattr(self, "channels") or self.channels is None)
else self.channels
)
if eeg_ch_names[idx - 1] in used_channels:
raw.info["bads"].append(eeg_ch_names[idx - 1])

@@ -285,5 +281,5 @@ def _get_single_subject_data(self, subject):
bad_info=raw.info["bads"],
)

subject_data[session] = {"run_0": raw}
subject_data[str(session)] = {"0": raw}
return subject_data
7 changes: 7 additions & 0 deletions moabb/datasets/summary_cvep.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
Dataset, #Subj, #Sessions, Sampling rate, #Chan, Trials length, #Trial classes, #Trials / class, #Epochs classes, #Epochs / class, Codes, Presentation rate, PapersWithCode leaderboard
Thielen2015,12,1,2048Hz,64,4.2s,36,3,2,27216 NT / 27216 T,Gold codes,120Hz,
Thielen2021,30,1,512Hz,8,31.5s,20,5,2,18900 NT / 18900 T,Gold codes,60Hz,
CastillosCVEP100, 12,1,500Hz,32,2.2s,4,15/15/15/15,2,3525 NT / 3495 T,m-sequence,60Hz,
CastillosCVEP40, 12,1,500Hz,32,2.2s,4,15/15/15/15,2,3525 NT / 3495 T,m-sequence,60Hz,
CastillosBurstVEP40, 12,1,500Hz,32,2.2s,4,15/15/15/15,2,5820 NT / 1200 T,Burst-CVEP,60Hz,
CastillosBurstVEP100,12,1,500Hz,32,2.2s,4,15/15/15/15,2,5820 NT / 1200 T,Burst-CVEP,60Hz,
19 changes: 19 additions & 0 deletions moabb/datasets/summary_imagery.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
Dataset, #Subj, #Chan, #Classes, #Trials, Trial length, Freq, #Session, #Runs, Total_trials, PapersWithCode leaderboard
AlexMI,8,16,3,20,3s,512Hz,1,1,480,https://paperswithcode.com/dataset/alexandremotorimagery-moabb
BNCI2014_001,9,22,4,144,4s,250Hz,2,6,62208,https://paperswithcode.com/dataset/bnci2014-001-moabb-1
BNCI2014_002,14,15,2,80,5s,512Hz,1,8,17920,https://paperswithcode.com/dataset/bnci2014-002-moabb-1
BNCI2014_004,9,3,2,360,4.5s,250Hz,5,1,32400,https://paperswithcode.com/dataset/bnci2014-004-moabb-1
BNCI2015_001,12,13,2,200,5s,512Hz,3,1,14400,https://paperswithcode.com/dataset/bnci2015-001-moabb-1
BNCI2015_004,9,30,5,80,7s,256Hz,2,1,7200,https://paperswithcode.com/dataset/bnci2015-004-moabb-1
Cho2017,52,64,2,100,3s,512Hz,1,1,9800,https://paperswithcode.com/dataset/cho2017-moabb
Lee2019_MI,54,62,2,100,4s,1000Hz,2,1,11000,https://paperswithcode.com/dataset/lee2019-mi-moabb-1
GrosseWentrup2009,10,128,2,150,7s,500Hz,1,1,3000,https://paperswithcode.com/dataset/grossewentrup2009-moabb
Schirrmeister2017,14,128,4,120,4s,500Hz,1,2,13440,https://paperswithcode.com/dataset/schirrmeister2017-moabb
Ofner2017,15,61,7,60,3s,512Hz,1,10,63000,
PhysionetMI,109,64,4,23,3s,160Hz,1,1,69760,https://paperswithcode.com/dataset/physionetmotorimagery-moabb
Shin2017A,29,30,2,30,10s,200Hz,3,1,5220,https://paperswithcode.com/dataset/shin2017a-moabb
Shin2017B,29,30,2,30,10s,200Hz,3,1,5220,
Weibo2014,10,60,7,80,4s,200Hz,1,1,5600,https://paperswithcode.com/dataset/weibo2014-moabb
Zhou2016,4,14,3,160,5s,250Hz,3,2,11496,https://paperswithcode.com/dataset/zhou2016-moabb
Stieger2021,62,64,4,450,3s,1000Hz,7 or 11,1,250000,
Liu2024,50,29,2,20,4s,500Hz,1,1,2000,
17 changes: 17 additions & 0 deletions moabb/datasets/summary_p300.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
Dataset, #Subj, #Chan, #Trials / class, Trials length, Sampling rate, #Sessions, PapersWithCode leaderboard
BNCI2014_008, 8, 8, 3500 NT / 700 T, 1s, 256Hz, 1,https://paperswithcode.com/dataset/bnci2014-008-moabb-1
BNCI2014_009, 10, 16, 1440 NT / 288 T, 0.8s, 256Hz, 3,https://paperswithcode.com/dataset/bnci2014-009-moabb-1
BNCI2015_003, 10, 8, 1500 NT / 300 T, 0.8s, 256Hz, 1,https://paperswithcode.com/dataset/bnci2015-003-moabb-1
BI2012, 25, 16, 640 NT / 128 T, 1s, 128Hz, 2,https://paperswithcode.com/dataset/braininvaders2012-moabb
BI2013a, 24, 16, 3200 NT / 640 T, 1s, 512Hz, 8 for subjects 1-7 else 1,https://paperswithcode.com/dataset/braininvaders2013a-moabb
BI2014a, 64, 16, 990 NT / 198 T, 1s, 512Hz, up to 3,https://paperswithcode.com/dataset/braininvaders2014a-moabb
BI2014b, 38, 32, 200 NT / 40 T, 1s, 512Hz, 3,https://paperswithcode.com/dataset/braininvaders2014b-moabb
BI2015a, 43, 32, 4131 NT / 825 T, 1s, 512Hz, 3,https://paperswithcode.com/dataset/braininvaders2015a-moabb
BI2015b, 44, 32, 2160 NT / 480 T, 1s, 512Hz, 1,https://paperswithcode.com/dataset/braininvaders2015b-moabb
Cattan2019_VR, 21, 16, 600 NT / 120 T, 1s, 512Hz, 2,https://paperswithcode.com/dataset/cattan2019-vr-moabb-1
Huebner2017, 13, 31, 364 NT / 112 T, 0.9s, 1000Hz, 3,https://paperswithcode.com/dataset/huebner2017-moabb
Huebner2018, 12, 31, 364 NT / 112 T, 0.9s, 1000Hz, 3,https://paperswithcode.com/dataset/huebner2018-moabb
Sosulski2019, 13, 31, 7500 NT / 1500 T, 1.2s, 1000Hz, 1,https://paperswithcode.com/dataset/sosulski2019-moabb
EPFLP300, 8, 32, 2753 NT / 551 T, 1s, 2048Hz, 4,https://paperswithcode.com/dataset/epflp300-moabb
Lee2019_ERP, 54, 62, 6900 NT / 1380 T, 1s, 1000Hz, 2,https://paperswithcode.com/dataset/lee2019-erp-moabb-1
DemonsP300, 60, 8, 935 NT / 50 T, 1s, 500Hz, 1,
4 changes: 4 additions & 0 deletions moabb/datasets/summary_rstate.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Dataset, #Subj, #Chan, #Classes, #Blocks / class, Trials length, Sampling rate, #Sessions, PapersWithCode leaderboard
Cattan2019_PHMD,12,16,2,10,60s,512Hz,1,
Hinss2021,15,62,4,1,2s,250Hz,1,
Rodrigues2017,20,16,2,5,10s,512Hz,1,
8 changes: 8 additions & 0 deletions moabb/datasets/summary_ssvep.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
Dataset, #Subj, #Chan, #Classes, #Trials / class, Trials length, Sampling rate, #Sessions, PapersWithCode leaderboard
Lee2019_SSVEP,54,62,4,50,4s,1000Hz,2,https://paperswithcode.com/dataset/lee2019-ssvep-moabb-1
Kalunga2016,12,8,4,16,2s,256Hz,1,https://paperswithcode.com/dataset/kalunga2016-moabb
MAMEM1,10,256,5,12-15,3s,250Hz,1,https://paperswithcode.com/dataset/mamem1-moabb
MAMEM2,10,256,5,20-30,3s,250Hz,1,https://paperswithcode.com/dataset/mamem2-moabb
MAMEM3,10,14,4,20-30,3s,128Hz,1,https://paperswithcode.com/dataset/mamem3-moabb
Nakanishi2015,9,8,12,15,4.15s,256Hz,1,https://paperswithcode.com/dataset/nakanishi2015-moabb
Wang2016,34,62,40,6,5s,250Hz,1,https://paperswithcode.com/dataset/wang2016-moabb
9 changes: 0 additions & 9 deletions moabb/datasets/thielen2015.py
Original file line number Diff line number Diff line change
@@ -24,15 +24,6 @@ class Thielen2015(BaseDataset):
Dataset [1]_ from the study on reconvolution for c-VEP [2]_.
.. admonition:: Dataset summary
==================== ======= ========= ============= ===== ============= ============== =============== ============== ================== ========== =================
Name #Subj #Sessions Sampling rate #Chan Trials length #Trial classes #Trials / class #Epoch classes #Epochs / class Codes Presentation rate
==================== ======= ========= ============= ===== ============= ============== =============== ============== ================== ========== =================
Thielen2015 12 1 2048Hz 64 4.2s 36 3 2 27216 NT / 27216 T Gold codes 120Hz
==================== ======= ========= ============= ===== ============= ============== =============== ============== ================== ========== =================
**Dataset description**
EEG recordings were obtained with a sampling rate of 2048 Hz, using a setup comprising 64 Ag/AgCl electrodes, and
8 changes: 0 additions & 8 deletions moabb/datasets/thielen2021.py
Original file line number Diff line number Diff line change
@@ -74,14 +74,6 @@ class Thielen2021(BaseDataset):
Dataset [1]_ from the study on zero-training c-VEP [2]_.
.. admonition:: Dataset summary
==================== ======= ========= ============= ===== ============= ============== =============== ============== ================== ========== =================
Name #Subj #Sessions Sampling rate #Chan Trials length #Trial classes #Trials / class #Epoch classes #Epochs / class Codes Presentation rate
==================== ======= ========= ============= ===== ============= ============== =============== ============== ================== ========== =================
Thielen2021 30 1 512Hz 8 31.5s 20 5 2 94500 NT / 94500 T Gold codes 60Hz
==================== ======= ========= ============= ===== ============= ============== =============== ============== ================== ========== =================
**Dataset description**
EEG recordings were acquired at a sampling rate of 512 Hz, employing 8 Ag/AgCl electrodes. The Biosemi ActiveTwo EEG
9 changes: 0 additions & 9 deletions moabb/datasets/upper_limb.py
Original file line number Diff line number Diff line change
@@ -13,15 +13,6 @@
class Ofner2017(BaseDataset):
"""Motor Imagery ataset from Ofner et al 2017.
.. admonition:: Dataset summary
========= ======= ======= ========== ================= ============ =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions
========= ======= ======= ========== ================= ============ =============== ===========
Ofner2017 15 61 7 60 3s 512Hz 1
========= ======= ======= ========== ================= ============ =============== ===========
Upper limb Motor imagery dataset from the paper [1]_.
**Dataset description**
47 changes: 45 additions & 2 deletions moabb/evaluations/base.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,32 @@
import logging
from abc import ABC, abstractmethod
from warnings import warn

import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.model_selection import GridSearchCV

from moabb.analysis import Results
from moabb.datasets.base import BaseDataset
from moabb.evaluations.utils import _convert_sklearn_params_to_optuna
from moabb.paradigms.base import BaseParadigm


log = logging.getLogger(__name__)

# Making the optuna soft dependency
try:
from optuna.integration import OptunaSearchCV

optuna_available = True
except ImportError:
optuna_available = False

if optuna_available:
search_methods = {"grid": GridSearchCV, "optuna": OptunaSearchCV}
else:
search_methods = {"grid": GridSearchCV}


class BaseEvaluation(ABC):
"""Base class that defines necessary operations for an evaluation.
@@ -53,11 +68,19 @@ class BaseEvaluation(ABC):
Save model after training, for each fold of cross-validation if needed
cache_config: bool, default=None
Configuration for caching of datasets. See :class:`moabb.datasets.base.CacheConfig` for details.
optuna:bool, default=False
If optuna is enable it will change the GridSearch to a RandomizedGridSearch with 15 minutes of cut off time.
This option is compatible with list of entries of type None, bool, int, float and string
time_out: default=60*15
Cut off time for the optuna search expressed in seconds, the default value is 15 minutes.
Only used with optuna equal to True.
Notes
-----
.. versionadded:: 1.1.0
n_splits, save_model, cache_config parameters.
.. versionadded:: 1.1.1
optuna, time_out parameters.
"""

def __init__(
@@ -77,6 +100,8 @@ def __init__(
n_splits=None,
save_model=False,
cache_config=None,
optuna=False,
time_out=60 * 15,
):
self.random_state = random_state
self.n_jobs = n_jobs
@@ -88,6 +113,16 @@ def __init__(
self.n_splits = n_splits
self.save_model = save_model
self.cache_config = cache_config
self.optuna = optuna
self.time_out = time_out

if self.optuna and not optuna_available:
raise ImportError("Optuna is not available. Please install it first.")
if (self.time_out != 60 * 15) and not self.optuna:
warn(
"time_out parameter is only used when optuna is enabled. "
"Ignoring time_out parameter."
)
# check paradigm
if not isinstance(paradigm, BaseParadigm):
raise (ValueError("paradigm must be an Paradigm instance"))
@@ -261,19 +296,27 @@ def is_valid(self, dataset):
"""

def _grid_search(self, param_grid, name, grid_clf, inner_cv):
extra_params = {}
if param_grid is not None:
if name in param_grid:
search = GridSearchCV(
if self.optuna:
search = search_methods["optuna"]
param_grid[name] = _convert_sklearn_params_to_optuna(param_grid[name])
extra_params["timeout"] = self.time_out
else:
search = search_methods["grid"]

search = search(
grid_clf,
param_grid[name],
refit=True,
cv=inner_cv,
n_jobs=self.n_jobs,
scoring=self.paradigm.scoring,
return_train_score=True,
**extra_params,
)
return search

else:
return grid_clf

43 changes: 42 additions & 1 deletion moabb/evaluations/utils.py
Original file line number Diff line number Diff line change
@@ -10,6 +10,14 @@
from sklearn.pipeline import Pipeline


try:
from optuna.distributions import CategoricalDistribution

optuna_available = True
except ImportError:
optuna_available = False


def _check_if_is_keras_model(model):
"""Check if the model is a Keras model.
@@ -214,7 +222,7 @@ def create_save_path(
return str(path_save)
else:
print("No hdf5_path provided, models will not be saved.")


def sort_group(groups):
runs_sort = []
@@ -225,3 +233,36 @@ def sort_group(groups):
runs_sort.append(index)
sorted_ix = np.argsort(runs_sort)
return groups[sorted_ix]


def _convert_sklearn_params_to_optuna(param_grid: dict) -> dict:
"""
Function to convert the parameter in Optuna format. This function will
create a categorical distribution of values from the list of values
provided in the parameter grid.
Parameters
----------
param_grid:
Dictionary with the parameters to be converted.
Returns
-------
optuna_params: dict
Dictionary with the parameters converted to Optuna format.
"""
if not optuna_available:
raise ImportError(
"Optuna is not available. Please install it optuna " "and optuna-integration."
)
else:
optuna_params = {}
for key, value in param_grid.items():
try:
if isinstance(value, list):
optuna_params[key] = CategoricalDistribution(value)
else:
optuna_params[key] = value
except Exception as e:
raise ValueError(f"Conversion failed for parameter {key}: {e}")
return optuna_params
10 changes: 10 additions & 0 deletions moabb/tests/benchmark.py
Original file line number Diff line number Diff line change
@@ -72,6 +72,16 @@ def test_include_exclude(self):
overwrite=True,
)

def test_optuna(self):
res = benchmark(
pipelines=str(self.pp_dir),
evaluations=["WithinSession"],
paradigms=["FakeImageryParadigm"],
overwrite=True,
optuna=True,
)
self.assertEqual(len(res), 40)


if __name__ == "__main__":
unittest.main()
29 changes: 28 additions & 1 deletion moabb/tests/datasets.py
Original file line number Diff line number Diff line change
@@ -11,7 +11,12 @@
import moabb.datasets as db
import moabb.datasets.compound_dataset as db_compound
from moabb.datasets import BNCI2014_001, Cattan2019_VR, Shin2017A, Shin2017B
from moabb.datasets.base import BaseDataset, is_abbrev, is_camel_kebab_case
from moabb.datasets.base import (
BaseDataset,
_summary_table,
is_abbrev,
is_camel_kebab_case,
)
from moabb.datasets.compound_dataset import CompoundDataset
from moabb.datasets.compound_dataset.utils import compound_dataset_list
from moabb.datasets.fake import FakeDataset, FakeVirtualRealityDataset
@@ -251,6 +256,28 @@ def test_depreciated_datasets_init(self):
self.assertIsNotNone(obj)
self.assertIn(ds.__name__, depreciated_names)

def test_dataset_docstring_table(self):
# The dataset summary table will be automatically added to the docstring of
# all the datasets listed in the moabb/datasets/summary_*.csv files.
depreciated_names, _, _ = zip(*aliases_list)
for ds in dataset_list:
if "Fake" in ds.__name__:
continue
if ds.__name__ in depreciated_names:
continue
self.assertIn(".. admonition:: Dataset summary", ds.__doc__)

def test_completeness_summary_table(self):
# The dataset summary table will be automatically added to the docstring of
# all the datasets listed in the moabb/datasets/summary_*.csv files.
depreciated_names, _, _ = zip(*aliases_list)
for ds in dataset_list:
if "Fake" in ds.__name__:
continue
if ds.__name__ in depreciated_names:
continue
self.assertIn(ds.__name__, _summary_table.index)

def test_dataset_list(self):
if aliases_list:
depreciated_list, _, _ = zip(*aliases_list)
25 changes: 25 additions & 0 deletions moabb/tests/evaluations.py
Original file line number Diff line number Diff line change
@@ -58,6 +58,7 @@ def setUp(self):
datasets=[dataset],
hdf5_path="res_test",
save_model=True,
optuna=False,
)

def test_mne_labels(self):
@@ -138,6 +139,30 @@ def test_eval_grid_search(self):
# We should have 9 columns in the results data frame
self.assertEqual(len(results[0].keys()), 9 if _carbonfootprint else 8)

def test_eval_grid_search_optuna(self):
# Test grid search
param_grid = {"C": {"csp__metric": ["euclid", "riemann"]}}
process_pipeline = self.eval.paradigm.make_process_pipelines(dataset)[0]

self.eval.optuna = True

results = [
r
for r in self.eval.evaluate(
dataset,
pipelines,
param_grid=param_grid,
process_pipeline=process_pipeline,
)
]

self.eval.optuna = False

# We should get 4 results, 2 sessions 2 subjects
self.assertEqual(len(results), 4)
# We should have 9 columns in the results data frame
self.assertEqual(len(results[0].keys()), 9 if _carbonfootprint else 8)

def test_within_session_evaluation_save_model(self):
res_test_path = "./res_test"

Loading

0 comments on commit e6661c4

Please sign in to comment.