diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2ba7f294c66..cb769988655 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,7 +1,7 @@
repos:
# Ruff mne
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.8.6
+ rev: v0.9.1
hooks:
- id: ruff
name: ruff lint mne
@@ -82,7 +82,7 @@ repos:
# zizmor
- repo: https://github.com/woodruffw/zizmor-pre-commit
- rev: v1.0.0
+ rev: v1.1.1
hooks:
- id: zizmor
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index d0aa9ea9e76..7149edac50b 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -112,7 +112,7 @@ stages:
- bash: |
set -e
python -m pip install --progress-bar off --upgrade pip
- python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn python-picard qtpy nibabel sphinx-gallery "PySide6!=6.8.0,!=6.8.0.1" pandas neo pymatreader antio defusedxml
+ python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn python-picard qtpy nibabel sphinx-gallery "PySide6!=6.8.0,!=6.8.0.1,!=6.8.1.1" pandas neo pymatreader antio defusedxml
python -m pip uninstall -yq mne
python -m pip install --progress-bar off --upgrade -e .[test]
displayName: 'Install dependencies with pip'
diff --git a/doc/changes/devel/13063.bugfix.rst b/doc/changes/devel/13063.bugfix.rst
new file mode 100644
index 00000000000..76eba2032a1
--- /dev/null
+++ b/doc/changes/devel/13063.bugfix.rst
@@ -0,0 +1 @@
+Fix bug in the colorbars created by :func:`mne.viz.plot_evoked_topomap` by `Santeri Ruuskanen`_.
\ No newline at end of file
diff --git a/doc/changes/devel/13069.bugfix.rst b/doc/changes/devel/13069.bugfix.rst
new file mode 100644
index 00000000000..7c23221c8df
--- /dev/null
+++ b/doc/changes/devel/13069.bugfix.rst
@@ -0,0 +1 @@
+Fix bug cause by unnecessary assertion when loading mixed frequency EDFs without preloading :func:`mne.io.read_raw_edf` by `Simon Kern`_.
\ No newline at end of file
diff --git a/doc/conf.py b/doc/conf.py
index 96028fb9045..74f66d8f6ae 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1289,7 +1289,7 @@ def fix_sklearn_inherited_docstrings(app, what, name, obj, options, lines):
rst_prolog += f"""
.. |{icon}| raw:: html
-
+
"""
rst_prolog += """
diff --git a/doc/sphinxext/credit_tools.py b/doc/sphinxext/credit_tools.py
index 708dcf00ce8..e22bd0b5530 100644
--- a/doc/sphinxext/credit_tools.py
+++ b/doc/sphinxext/credit_tools.py
@@ -169,7 +169,7 @@ def generate_credit_rst(app=None, *, verbose=False):
if author["e"] is not None:
if author["e"] not in name_map:
unknown_emails.add(
- f'{author["e"].ljust(29)} '
+ f"{author['e'].ljust(29)} "
"https://github.com/mne-tools/mne-python/pull/"
f"{commit}/files"
)
@@ -178,9 +178,9 @@ def generate_credit_rst(app=None, *, verbose=False):
else:
name = author["n"]
if name in manual_renames:
- assert _good_name(
- manual_renames[name]
- ), f"Bad manual rename: {name}"
+ assert _good_name(manual_renames[name]), (
+ f"Bad manual rename: {name}"
+ )
name = manual_renames[name]
if " " in name:
first, last = name.rsplit(" ", maxsplit=1)
diff --git a/doc/sphinxext/related_software.py b/doc/sphinxext/related_software.py
index ac1b741b9af..ab159b0fcb4 100644
--- a/doc/sphinxext/related_software.py
+++ b/doc/sphinxext/related_software.py
@@ -163,9 +163,9 @@ def _get_packages() -> dict[str, str]:
assert not dups, f"Duplicates in MANUAL_PACKAGES and PYPI_PACKAGES: {sorted(dups)}"
# And the installer and PyPI-only should be disjoint:
dups = set(PYPI_PACKAGES) & set(packages)
- assert (
- not dups
- ), f"Duplicates in PYPI_PACKAGES and installer packages: {sorted(dups)}"
+ assert not dups, (
+ f"Duplicates in PYPI_PACKAGES and installer packages: {sorted(dups)}"
+ )
for name in PYPI_PACKAGES | set(MANUAL_PACKAGES):
if name not in packages:
packages.append(name)
diff --git a/doc/sphinxext/unit_role.py b/doc/sphinxext/unit_role.py
index b52665e8321..bf31ddf76c4 100644
--- a/doc/sphinxext/unit_role.py
+++ b/doc/sphinxext/unit_role.py
@@ -10,8 +10,7 @@ def unit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): #
def pass_error_to_sphinx(rawtext, text, lineno, inliner):
msg = inliner.reporter.error(
- "The :unit: role requires a space-separated number and unit; "
- f"got {text}",
+ f"The :unit: role requires a space-separated number and unit; got {text}",
line=lineno,
)
prb = inliner.problematic(rawtext, rawtext, msg)
diff --git a/examples/inverse/vector_mne_solution.py b/examples/inverse/vector_mne_solution.py
index ca953cd2f24..f6ae788c145 100644
--- a/examples/inverse/vector_mne_solution.py
+++ b/examples/inverse/vector_mne_solution.py
@@ -79,7 +79,7 @@
# inverse was computed with loose=0.2
print(
"Absolute cosine similarity between source normals and directions: "
- f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}'
+ f"{np.abs(np.sum(directions * inv['source_nn'][2::3], axis=-1)).mean()}"
)
brain_max = stc_max.plot(
initial_time=peak_time,
diff --git a/examples/visualization/evoked_topomap.py b/examples/visualization/evoked_topomap.py
index 83d1916c6f9..53b7a60dbba 100644
--- a/examples/visualization/evoked_topomap.py
+++ b/examples/visualization/evoked_topomap.py
@@ -5,8 +5,8 @@
Plotting topographic maps of evoked data
========================================
-Load evoked data and plot topomaps for selected time points using multiple
-additional options.
+Load evoked data and plot topomaps for selected time points using
+multiple additional options.
"""
# Authors: Christian Brodbeck
# Tal Linzen
diff --git a/examples/visualization/evoked_whitening.py b/examples/visualization/evoked_whitening.py
index ed05ae3ba11..4bcb4bc8c04 100644
--- a/examples/visualization/evoked_whitening.py
+++ b/examples/visualization/evoked_whitening.py
@@ -85,7 +85,7 @@
print("Covariance estimates sorted from best to worst")
for c in noise_covs:
- print(f'{c["method"]} : {c["loglik"]}')
+ print(f"{c['method']} : {c['loglik']}")
# %%
# Show the evoked data:
diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py
index e55fd5d2dae..eb8b6bc396a 100644
--- a/mne/_fiff/_digitization.py
+++ b/mne/_fiff/_digitization.py
@@ -328,8 +328,7 @@ def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True):
dig_coord_frames = set([FIFF.FIFFV_COORD_HEAD])
if len(dig_coord_frames) != 1:
raise RuntimeError(
- "Only single coordinate frame in dig is supported, "
- f"got {dig_coord_frames}"
+ f"Only single coordinate frame in dig is supported, got {dig_coord_frames}"
)
dig_ch_pos_location = np.array(dig_ch_pos_location)
dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3)
diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py
index ecc93591a05..51612824a6a 100644
--- a/mne/_fiff/meas_info.py
+++ b/mne/_fiff/meas_info.py
@@ -455,7 +455,7 @@ def _check_set(ch, projs, ch_type):
for proj in projs:
if ch["ch_name"] in proj["data"]["col_names"]:
raise RuntimeError(
- f'Cannot change channel type for channel {ch["ch_name"]} in '
+ f"Cannot change channel type for channel {ch['ch_name']} in "
f'projector "{proj["desc"]}"'
)
ch["kind"] = new_kind
@@ -1867,7 +1867,7 @@ def _check_consistency(self, prepend_error=""):
):
raise RuntimeError(
f'{prepend_error}info["meas_date"] must be a datetime object in UTC'
- f' or None, got {repr(self["meas_date"])!r}'
+ f" or None, got {repr(self['meas_date'])!r}"
)
chs = [ch["ch_name"] for ch in self["chs"]]
@@ -3680,8 +3680,7 @@ def _write_ch_infos(fid, chs, reset_range, ch_names_mapping):
# only write new-style channel information if necessary
if len(ch_names_mapping):
logger.info(
- " Writing channel names to FIF truncated to 15 characters "
- "with remapping"
+ " Writing channel names to FIF truncated to 15 characters with remapping"
)
for ch in chs:
start_block(fid, FIFF.FIFFB_CH_INFO)
diff --git a/mne/_fiff/proj.py b/mne/_fiff/proj.py
index 0376826138a..d6ec108e34d 100644
--- a/mne/_fiff/proj.py
+++ b/mne/_fiff/proj.py
@@ -76,7 +76,7 @@ def __repr__(self): # noqa: D105
s += f", active : {self['active']}"
s += f", n_channels : {len(self['data']['col_names'])}"
if self["explained_var"] is not None:
- s += f', exp. var : {self["explained_var"] * 100:0.2f}%'
+ s += f", exp. var : {self['explained_var'] * 100:0.2f}%"
return f""
# speed up info copy by taking advantage of mutability
@@ -324,8 +324,7 @@ def apply_proj(self, verbose=None):
if all(p["active"] for p in self.info["projs"]):
logger.info(
- "Projections have already been applied. "
- "Setting proj attribute to True."
+ "Projections have already been applied. Setting proj attribute to True."
)
return self
@@ -663,9 +662,9 @@ def _read_proj(fid, node, *, ch_names_mapping=None, verbose=None):
for proj in projs:
misc = "active" if proj["active"] else " idle"
logger.info(
- f' {proj["desc"]} '
- f'({proj["data"]["nrow"]} x '
- f'{len(proj["data"]["col_names"])}) {misc}'
+ f" {proj['desc']} "
+ f"({proj['data']['nrow']} x "
+ f"{len(proj['data']['col_names'])}) {misc}"
)
return projs
@@ -795,8 +794,7 @@ def _make_projector(projs, ch_names, bads=(), include_active=True, inplace=False
if not p["active"] or include_active:
if len(p["data"]["col_names"]) != len(np.unique(p["data"]["col_names"])):
raise ValueError(
- f"Channel name list in projection item {k}"
- " contains duplicate items"
+ f"Channel name list in projection item {k} contains duplicate items"
)
# Get the two selection vectors to pick correct elements from
@@ -832,7 +830,7 @@ def _make_projector(projs, ch_names, bads=(), include_active=True, inplace=False
)
):
warn(
- f'Projection vector {repr(p["desc"])} has been '
+ f"Projection vector {repr(p['desc'])} has been "
f"reduced to {100 * psize:0.2f}% of its "
"original magnitude by subselecting "
f"{len(vecsel)}/{orig_n} of the original "
diff --git a/mne/_fiff/reference.py b/mne/_fiff/reference.py
index e70bf5e36c1..b4c050c096d 100644
--- a/mne/_fiff/reference.py
+++ b/mne/_fiff/reference.py
@@ -102,7 +102,7 @@ def _check_before_dict_reference(inst, ref_dict):
raise TypeError(
f"{elem_name.capitalize()}s in the ref_channels dict must be strings. "
f"Your dict has {elem_name}s of type "
- f'{", ".join(map(lambda x: x.__name__, bad_elem))}.'
+ f"{', '.join(map(lambda x: x.__name__, bad_elem))}."
)
# Check that keys are valid channels and values are lists-of-valid-channels
@@ -113,8 +113,8 @@ def _check_before_dict_reference(inst, ref_dict):
for elem_name, elem in dict(key=keys, value=values).items():
if bad_elem := elem - ch_set:
raise ValueError(
- f'ref_channels dict contains invalid {elem_name}(s) '
- f'({", ".join(bad_elem)}) '
+ f"ref_channels dict contains invalid {elem_name}(s) "
+ f"({', '.join(bad_elem)}) "
"that are not names of channels in the instance."
)
# Check that values are not bad channels
diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py
index abc7d32036b..3fd36454d58 100644
--- a/mne/_fiff/tag.py
+++ b/mne/_fiff/tag.py
@@ -70,8 +70,7 @@ def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
have_shape = tag_size // item_size
if want_shape != have_shape:
raise ValueError(
- f"Wrong shape specified, requested {want_shape} but got "
- f"{have_shape}"
+ f"Wrong shape specified, requested {want_shape} but got {have_shape}"
)
if not len(rlims) == 2:
raise ValueError("rlims must have two elements")
diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py
index d088da2a4a2..a38ecaade50 100644
--- a/mne/_fiff/tests/test_meas_info.py
+++ b/mne/_fiff/tests/test_meas_info.py
@@ -1118,7 +1118,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname):
meas_info, "_read_extended_ch_info", _read_extended_ch_info
)
short_proj_names = [
- f"{name[:13 - bool(len(ref_names))]}-{ni}"
+ f"{name[: 13 - bool(len(ref_names))]}-{ni}"
for ni, name in enumerate(long_proj_names)
]
assert raw_read.info["projs"][0]["data"]["col_names"] == short_proj_names
diff --git a/mne/_fiff/tests/test_pick.py b/mne/_fiff/tests/test_pick.py
index 90830e1d5e5..5d1b24247ab 100644
--- a/mne/_fiff/tests/test_pick.py
+++ b/mne/_fiff/tests/test_pick.py
@@ -136,7 +136,7 @@ def _channel_type_old(info, idx):
else:
return t
- raise ValueError(f'Unknown channel type for {ch["ch_name"]}')
+ raise ValueError(f"Unknown channel type for {ch['ch_name']}")
def _assert_channel_types(info):
diff --git a/mne/beamformer/_compute_beamformer.py b/mne/beamformer/_compute_beamformer.py
index bb947cdd757..16bedc2c317 100644
--- a/mne/beamformer/_compute_beamformer.py
+++ b/mne/beamformer/_compute_beamformer.py
@@ -507,13 +507,13 @@ def __repr__(self): # noqa: D105
n_channels,
)
if self["pick_ori"] is not None:
- out += f', {self["pick_ori"]} ori'
+ out += f", {self['pick_ori']} ori"
if self["weight_norm"] is not None:
- out += f', {self["weight_norm"]} norm'
+ out += f", {self['weight_norm']} norm"
if self.get("inversion") is not None:
- out += f', {self["inversion"]} inversion'
+ out += f", {self['inversion']} inversion"
if "rank" in self:
- out += f', rank {self["rank"]}'
+ out += f", rank {self['rank']}"
out += ">"
return out
@@ -531,7 +531,7 @@ def save(self, fname, overwrite=False, verbose=None):
"""
_, write_hdf5 = _import_h5io_funcs()
- ending = f'-{self["kind"].lower()}.h5'
+ ending = f"-{self['kind'].lower()}.h5"
check_fname(fname, self["kind"], (ending,))
csd_orig = None
try:
diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py
index 957dbaf5284..9ae5473e190 100644
--- a/mne/beamformer/tests/test_lcmv.py
+++ b/mne/beamformer/tests/test_lcmv.py
@@ -380,7 +380,7 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind):
rank = 17 if proj else 20
assert "LCMV" in repr(filters)
assert "unknown subject" not in repr(filters)
- assert f'{fwd["nsource"]} vert' in repr(filters)
+ assert f"{fwd['nsource']} vert" in repr(filters)
assert "20 ch" in repr(filters)
assert f"rank {rank}" in repr(filters)
diff --git a/mne/bem.py b/mne/bem.py
index d361272fd49..22aa02d2a0d 100644
--- a/mne/bem.py
+++ b/mne/bem.py
@@ -91,7 +91,7 @@ class ConductorModel(dict):
def __repr__(self): # noqa: D105
if self["is_sphere"]:
- center = ", ".join(f"{x * 1000.:.1f}" for x in self["r0"])
+ center = ", ".join(f"{x * 1000.0:.1f}" for x in self["r0"])
rad = self.radius
if rad is None: # no radius / MEG only
extra = f"Sphere (no layers): r0=[{center}] mm"
@@ -538,7 +538,7 @@ def _assert_complete_surface(surf, incomplete="raise"):
prop = tot_angle / (2 * np.pi)
if np.abs(prop - 1.0) > 1e-5:
msg = (
- f'Surface {_bem_surf_name[surf["id"]]} is not complete (sum of '
+ f"Surface {_bem_surf_name[surf['id']]} is not complete (sum of "
f"solid angles yielded {prop}, should be 1.)"
)
_on_missing(incomplete, msg, name="incomplete", error_klass=RuntimeError)
@@ -571,7 +571,7 @@ def _check_surface_size(surf):
sizes = surf["rr"].max(axis=0) - surf["rr"].min(axis=0)
if (sizes < 0.05).any():
raise RuntimeError(
- f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too '
+ f"Dimensions of the surface {_bem_surf_name[surf['id']]} seem too "
f"small ({1000 * sizes.min():9.5f}). Maybe the unit of measure"
" is meters instead of mm"
)
@@ -599,8 +599,7 @@ def _surfaces_to_bem(
# surfs can be strings (filenames) or surface dicts
if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) == len(sigmas)):
raise ValueError(
- "surfs, ids, and sigmas must all have the same "
- "number of elements (1 or 3)"
+ "surfs, ids, and sigmas must all have the same number of elements (1 or 3)"
)
for si, surf in enumerate(surfs):
if isinstance(surf, str | Path | os.PathLike):
@@ -1260,8 +1259,7 @@ def make_watershed_bem(
if op.isdir(ws_dir):
if not overwrite:
raise RuntimeError(
- f"{ws_dir} already exists. Use the --overwrite option"
- " to recreate it."
+ f"{ws_dir} already exists. Use the --overwrite option to recreate it."
)
else:
shutil.rmtree(ws_dir)
@@ -2460,7 +2458,7 @@ def check_seghead(surf_path=subj_path / "surf"):
logger.info(f"{ii}. Creating {level} tessellation...")
logger.info(
f"{ii}.1 Decimating the dense tessellation "
- f'({len(surf["tris"])} -> {n_tri} triangles)...'
+ f"({len(surf['tris'])} -> {n_tri} triangles)..."
)
points, tris = decimate_surface(
points=surf["rr"], triangles=surf["tris"], n_triangles=n_tri
diff --git a/mne/channels/channels.py b/mne/channels/channels.py
index ed6dd8508cc..bf9e58f2819 100644
--- a/mne/channels/channels.py
+++ b/mne/channels/channels.py
@@ -1382,7 +1382,7 @@ def read_ch_adjacency(fname, picks=None):
raise ValueError(
f"No built-in channel adjacency matrix found with name: "
f"{ch_adj_name}. Valid names are: "
- f'{", ".join(get_builtin_ch_adjacencies())}'
+ f"{', '.join(get_builtin_ch_adjacencies())}"
)
ch_adj = [a for a in _BUILTIN_CHANNEL_ADJACENCIES if a.name == ch_adj_name][0]
diff --git a/mne/channels/montage.py b/mne/channels/montage.py
index b22b9220e14..15cef38dec7 100644
--- a/mne/channels/montage.py
+++ b/mne/channels/montage.py
@@ -1287,7 +1287,7 @@ def _backcompat_value(pos, ref_pos):
f"Not setting position{_pl(extra)} of {len(extra)} {types} "
f"channel{_pl(extra)} found in montage:\n{names}\n"
"Consider setting the channel types to be of "
- f'{docdict["montage_types"]} '
+ f"{docdict['montage_types']} "
"using inst.set_channel_types before calling inst.set_montage, "
"or omit these channels when creating your montage."
)
diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py
index f51b551a1c8..bb886c51a96 100644
--- a/mne/channels/tests/test_channels.py
+++ b/mne/channels/tests/test_channels.py
@@ -404,8 +404,7 @@ def test_adjacency_matches_ft(tmp_path):
if hash_mne.hexdigest() != hash_ft.hexdigest():
raise ValueError(
- f"Hash mismatch between built-in and FieldTrip neighbors "
- f"for {fname}"
+ f"Hash mismatch between built-in and FieldTrip neighbors for {fname}"
)
diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py
index 8add1398409..d9306b5e1bd 100644
--- a/mne/channels/tests/test_montage.py
+++ b/mne/channels/tests/test_montage.py
@@ -420,12 +420,7 @@ def test_documented():
),
pytest.param(
partial(read_dig_hpts, unit="m"),
- (
- "eeg Fp1 -95.0 -3. -3.\n"
- "eeg AF7 -1 -1 -3\n"
- "eeg A3 -2 -2 2\n"
- "eeg A 0 0 0"
- ),
+ ("eeg Fp1 -95.0 -3. -3.\neeg AF7 -1 -1 -3\neeg A3 -2 -2 2\neeg A 0 0 0"),
make_dig_montage(
ch_pos={
"A": [0.0, 0.0, 0.0],
diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py
index 5b7d020b98d..894ede7fa1a 100644
--- a/mne/commands/mne_make_scalp_surfaces.py
+++ b/mne/commands/mne_make_scalp_surfaces.py
@@ -49,8 +49,7 @@ def run():
"--force",
dest="force",
action="store_true",
- help="Force creation of the surface even if it has "
- "some topological defects.",
+ help="Force creation of the surface even if it has some topological defects.",
)
parser.add_option(
"-t",
diff --git a/mne/commands/mne_setup_source_space.py b/mne/commands/mne_setup_source_space.py
index e536a59f90b..273e833b31c 100644
--- a/mne/commands/mne_setup_source_space.py
+++ b/mne/commands/mne_setup_source_space.py
@@ -62,8 +62,7 @@ def run():
parser.add_option(
"--ico",
dest="ico",
- help="use the recursively subdivided icosahedron "
- "to create the source space.",
+ help="use the recursively subdivided icosahedron to create the source space.",
default=None,
type="int",
)
diff --git a/mne/conftest.py b/mne/conftest.py
index 85e3f9d255b..8a4586067b3 100644
--- a/mne/conftest.py
+++ b/mne/conftest.py
@@ -186,6 +186,7 @@ def pytest_configure(config: pytest.Config):
ignore:.*builtin type swigvarlink has no.*:DeprecationWarning
# eeglabio
ignore:numpy\.core\.records is deprecated.*:DeprecationWarning
+ ignore:Starting field name with a underscore.*:
# joblib
ignore:process .* is multi-threaded, use of fork/exec.*:DeprecationWarning
""" # noqa: E501
diff --git a/mne/coreg.py b/mne/coreg.py
index f28c6142c96..c7549ee028a 100644
--- a/mne/coreg.py
+++ b/mne/coreg.py
@@ -876,8 +876,7 @@ def _scale_params(subject_to, subject_from, scale, subjects_dir):
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if (subject_from is None) != (scale is None):
raise TypeError(
- "Need to provide either both subject_from and scale "
- "parameters, or neither."
+ "Need to provide either both subject_from and scale parameters, or neither."
)
if subject_from is None:
@@ -1402,8 +1401,7 @@ def _read_surface(filename, *, on_defects):
complete_surface_info(bem, copy=False)
except Exception:
raise ValueError(
- f"Error loading surface from {filename} (see "
- "Terminal for details)."
+ f"Error loading surface from {filename} (see Terminal for details)."
)
return bem
@@ -2145,8 +2143,7 @@ def omit_head_shape_points(self, distance):
mask = self._orig_hsp_point_distance <= distance
n_excluded = np.sum(~mask)
logger.info(
- "Coregistration: Excluding %i head shape points with "
- "distance >= %.3f m.",
+ "Coregistration: Excluding %i head shape points with distance >= %.3f m.",
n_excluded,
distance,
)
diff --git a/mne/cov.py b/mne/cov.py
index 19f70af2bd6..694c836d0cd 100644
--- a/mne/cov.py
+++ b/mne/cov.py
@@ -1293,7 +1293,7 @@ def _compute_covariance_auto(
data_ = data.copy()
name = method_.__name__ if callable(method_) else method_
logger.info(
- f'Estimating {cov_kind + (" " if cov_kind else "")}'
+ f"Estimating {cov_kind + (' ' if cov_kind else '')}"
f"covariance using {name.upper()}"
)
mp = method_params[method_]
@@ -1712,7 +1712,7 @@ def _get_ch_whitener(A, pca, ch_type, rank):
logger.info(
f" Setting small {ch_type} eigenvalues to zero "
- f'({"using" if pca else "without"} PCA)'
+ f"({'using' if pca else 'without'} PCA)"
)
if pca: # No PCA case.
# This line will reduce the actual number of variables in data
@@ -2400,7 +2400,7 @@ def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
data = tag.data
diag = True
logger.info(
- " %d x %d diagonal covariance (kind = " "%d) found.",
+ " %d x %d diagonal covariance (kind = %d) found.",
dim,
dim,
cov_kind,
@@ -2416,7 +2416,7 @@ def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
data.flat[:: dim + 1] /= 2.0
diag = False
logger.info(
- " %d x %d full covariance (kind = %d) " "found.",
+ " %d x %d full covariance (kind = %d) found.",
dim,
dim,
cov_kind,
@@ -2425,7 +2425,7 @@ def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
diag = False
data = tag.data
logger.info(
- " %d x %d sparse covariance (kind = %d)" " found.",
+ " %d x %d sparse covariance (kind = %d) found.",
dim,
dim,
cov_kind,
diff --git a/mne/datasets/_fetch.py b/mne/datasets/_fetch.py
index 1e38606f908..8f44459ad97 100644
--- a/mne/datasets/_fetch.py
+++ b/mne/datasets/_fetch.py
@@ -143,8 +143,7 @@ def fetch_dataset(
if auth is not None:
if len(auth) != 2:
raise RuntimeError(
- "auth should be a 2-tuple consisting "
- "of a username and password/token."
+ "auth should be a 2-tuple consisting of a username and password/token."
)
# processor to uncompress files
diff --git a/mne/datasets/config.py b/mne/datasets/config.py
index ccd4babacd9..75eff184cd1 100644
--- a/mne/datasets/config.py
+++ b/mne/datasets/config.py
@@ -92,8 +92,8 @@
phantom_kit="0.2",
ucl_opm_auditory="0.2",
)
-TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}'
-MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}'
+TESTING_VERSIONED = f"mne-testing-data-{RELEASES['testing']}"
+MISC_VERSIONED = f"mne-misc-data-{RELEASES['misc']}"
# To update any other dataset besides `testing` or `misc`, upload the new
# version of the data archive itself (e.g., to https://osf.io or wherever) and
@@ -118,7 +118,7 @@
hash="md5:d94fe9f3abe949a507eaeb865fb84a3f",
url=(
"https://codeload.github.com/mne-tools/mne-testing-data/"
- f'tar.gz/{RELEASES["testing"]}'
+ f"tar.gz/{RELEASES['testing']}"
),
# In case we ever have to resort to osf.io again...
# archive_name='mne-testing-data.tar.gz',
@@ -131,8 +131,7 @@
archive_name=f"{MISC_VERSIONED}.tar.gz", # 'mne-misc-data',
hash="md5:e343d3a00cb49f8a2f719d14f4758afe",
url=(
- "https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/"
- f'{RELEASES["misc"]}'
+ f"https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/{RELEASES['misc']}"
),
folder_name="MNE-misc-data",
config_key="MNE_DATASETS_MISC_PATH",
diff --git a/mne/datasets/sleep_physionet/age.py b/mne/datasets/sleep_physionet/age.py
index c14282ed202..b5ea1764946 100644
--- a/mne/datasets/sleep_physionet/age.py
+++ b/mne/datasets/sleep_physionet/age.py
@@ -122,10 +122,7 @@ def fetch_data(
)
_on_missing(on_missing, msg)
if 13 in subjects and 2 in recording:
- msg = (
- "Requested recording 2 for subject 13, but it is not available "
- "in corpus."
- )
+ msg = "Requested recording 2 for subject 13, but it is not available in corpus."
_on_missing(on_missing, msg)
fnames = []
diff --git a/mne/epochs.py b/mne/epochs.py
index 04b1a288bfe..679643ab969 100644
--- a/mne/epochs.py
+++ b/mne/epochs.py
@@ -1671,8 +1671,7 @@ def _get_data(
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info["ch_names"]), len(self.times)))
msg = (
- f"for {n_events} events and {len(self._raw_times)} "
- "original time points"
+ f"for {n_events} events and {len(self._raw_times)} original time points"
)
if self._decim > 1:
msg += " (prior to decimation)"
@@ -2301,8 +2300,7 @@ def save(
logger.info(f"Splitting into {n_parts} parts")
if n_parts > 100: # This must be an error
raise ValueError(
- f"Split size {split_size} would result in writing "
- f"{n_parts} files"
+ f"Split size {split_size} would result in writing {n_parts} files"
)
if len(self.drop_log) > 100000:
@@ -3143,7 +3141,7 @@ def _ensure_list(x):
raise ValueError(
f"The event names in keep_first and keep_last must "
f"be mutually exclusive. Specified in both: "
- f'{", ".join(sorted(keep_first_and_last))}'
+ f"{', '.join(sorted(keep_first_and_last))}"
)
del keep_first_and_last
@@ -3163,7 +3161,7 @@ def _diff_input_strings_vs_event_id(input_strings, input_name, event_id):
if event_name_diff:
raise ValueError(
f"Present in {input_name}, but missing from event_id: "
- f'{", ".join(event_name_diff)}'
+ f"{', '.join(event_name_diff)}"
)
_diff_input_strings_vs_event_id(
@@ -3556,8 +3554,7 @@ def __init__(
if not isinstance(raw, BaseRaw):
raise ValueError(
- "The first argument to `Epochs` must be an "
- "instance of mne.io.BaseRaw"
+ "The first argument to `Epochs` must be an instance of mne.io.BaseRaw"
)
info = deepcopy(raw.info)
annotations = raw.annotations.copy()
@@ -4441,8 +4438,7 @@ def _get_epoch_from_raw(self, idx, verbose=None):
else:
# read the correct subset of the data
raise RuntimeError(
- "Correct epoch could not be found, please "
- "contact mne-python developers"
+ "Correct epoch could not be found, please contact mne-python developers"
)
# the following is equivalent to this, but faster:
#
diff --git a/mne/event.py b/mne/event.py
index 723615ea56a..a19270db1e6 100644
--- a/mne/event.py
+++ b/mne/event.py
@@ -1649,7 +1649,7 @@ def match_event_names(event_names, keys, *, on_missing="raise"):
_on_missing(
on_missing=on_missing,
msg=f'Event name "{key}" could not be found. The following events '
- f'are present in the data: {", ".join(event_names)}',
+ f"are present in the data: {', '.join(event_names)}",
error_klass=KeyError,
)
diff --git a/mne/evoked.py b/mne/evoked.py
index 5fb09db9d1b..c04f83531e3 100644
--- a/mne/evoked.py
+++ b/mne/evoked.py
@@ -962,7 +962,7 @@ def __neg__(self):
if out.comment is not None and " + " in out.comment:
out.comment = f"({out.comment})" # multiple conditions in evoked
- out.comment = f'- {out.comment or "unknown"}'
+ out.comment = f"- {out.comment or 'unknown'}"
return out
def get_peak(
@@ -1053,8 +1053,7 @@ def get_peak(
raise ValueError('Channel type must be "grad" for merge_grads')
elif mode == "neg":
raise ValueError(
- "Negative mode (mode=neg) does not make "
- "sense with merge_grads=True"
+ "Negative mode (mode=neg) does not make sense with merge_grads=True"
)
meg = eeg = misc = seeg = dbs = ecog = fnirs = False
@@ -1650,12 +1649,12 @@ def combine_evoked(all_evoked, weights):
if e.comment is not None and " + " in e.comment: # multiple conditions
this_comment = f"({e.comment})"
else:
- this_comment = f'{e.comment or "unknown"}'
+ this_comment = f"{e.comment or 'unknown'}"
# assemble everything
if idx == 0:
comment += f"{sign}{weight}{multiplier}{this_comment}"
else:
- comment += f' {sign or "+"} {weight}{multiplier}{this_comment}'
+ comment += f" {sign or '+'} {weight}{multiplier}{this_comment}"
# special-case: combine_evoked([e1, -e2], [1, -1])
evoked.comment = comment.replace(" - - ", " + ")
return evoked
@@ -1872,8 +1871,7 @@ def _read_evoked(fname, condition=None, kind="average", allow_maxshield=False):
if len(chs) != nchan:
raise ValueError(
- "Number of channels and number of "
- "channel definitions are different"
+ "Number of channels and number of channel definitions are different"
)
ch_names_mapping = _read_extended_ch_info(chs, my_evoked, fid)
diff --git a/mne/export/_egimff.py b/mne/export/_egimff.py
index 3792ea4a6a5..185afb5f558 100644
--- a/mne/export/_egimff.py
+++ b/mne/export/_egimff.py
@@ -53,7 +53,7 @@ def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, verbose=
info = evoked[0].info
if np.round(info["sfreq"]) != info["sfreq"]:
raise ValueError(
- f'Sampling frequency must be a whole number. sfreq: {info["sfreq"]}'
+ f"Sampling frequency must be a whole number. sfreq: {info['sfreq']}"
)
sampling_rate = int(info["sfreq"])
diff --git a/mne/export/_export.py b/mne/export/_export.py
index 490bf986895..6e63064bf7c 100644
--- a/mne/export/_export.py
+++ b/mne/export/_export.py
@@ -216,7 +216,6 @@ def _infer_check_export_fmt(fmt, fname, supported_formats):
supported_str = ", ".join(supported)
raise ValueError(
- f"Format '{fmt}' is not supported. "
- f"Supported formats are {supported_str}."
+ f"Format '{fmt}' is not supported. Supported formats are {supported_str}."
)
return fmt
diff --git a/mne/export/tests/test_export.py b/mne/export/tests/test_export.py
index ca0853837fc..191e91b1eed 100644
--- a/mne/export/tests/test_export.py
+++ b/mne/export/tests/test_export.py
@@ -235,7 +235,7 @@ def test_edf_padding(tmp_path, pad_width):
RuntimeWarning,
match=(
"EDF format requires equal-length data blocks.*"
- f"{pad_width/1000:.3g} seconds of edge values were appended.*"
+ f"{pad_width / 1000:.3g} seconds of edge values were appended.*"
),
):
raw.export(temp_fname)
@@ -580,7 +580,7 @@ def test_export_to_mff_incompatible_sfreq():
"""Test non-whole number sampling frequency throws ValueError."""
pytest.importorskip("mffpy", "0.5.7")
evoked = read_evokeds(fname_evoked)
- with pytest.raises(ValueError, match=f'sfreq: {evoked[0].info["sfreq"]}'):
+ with pytest.raises(ValueError, match=f"sfreq: {evoked[0].info['sfreq']}"):
export_evokeds("output.mff", evoked)
diff --git a/mne/filter.py b/mne/filter.py
index ee5b34cd657..a7d7c883e2f 100644
--- a/mne/filter.py
+++ b/mne/filter.py
@@ -411,8 +411,7 @@ def _prep_for_filtering(x, copy, picks=None):
picks = np.tile(picks, n_epochs) + offset
elif len(orig_shape) > 3:
raise ValueError(
- "picks argument is not supported for data with more"
- " than three dimensions"
+ "picks argument is not supported for data with more than three dimensions"
)
assert all(0 <= pick < x.shape[0] for pick in picks) # guaranteed by above
@@ -2873,7 +2872,7 @@ def design_mne_c_filter(
h_width = (int(((n_freqs - 1) * h_trans_bandwidth) / (0.5 * sfreq)) + 1) // 2
h_start = int(((n_freqs - 1) * h_freq) / (0.5 * sfreq))
logger.info(
- "filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d " "hpw : %d lpw : %d",
+ "filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d hpw : %d lpw : %d",
l_freq,
h_freq,
l_start,
diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py
index b505b5e45df..e98a147b560 100644
--- a/mne/forward/_field_interpolation.py
+++ b/mne/forward/_field_interpolation.py
@@ -96,7 +96,7 @@ def _pinv_trunc(x, miss):
varexp /= varexp[-1]
n = np.where(varexp >= (1.0 - miss))[0][0] + 1
logger.info(
- " Truncating at %d/%d components to omit less than %g " "(%0.2g)",
+ " Truncating at %d/%d components to omit less than %g (%0.2g)",
n,
len(s),
miss,
@@ -111,8 +111,7 @@ def _pinv_tikhonov(x, reg):
# _reg_pinv requires square Hermitian, which we have here
inv, _, n = _reg_pinv(x, reg=reg, rank=None)
logger.info(
- f" Truncating at {n}/{len(x)} components and regularizing "
- f"with α={reg:0.1e}"
+ f" Truncating at {n}/{len(x)} components and regularizing with α={reg:0.1e}"
)
return inv, n
diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py
index 64aadf69fec..6c77f47e312 100644
--- a/mne/forward/_make_forward.py
+++ b/mne/forward/_make_forward.py
@@ -160,8 +160,7 @@ def _create_meg_coil(coilset, ch, acc, do_es):
break
else:
raise RuntimeError(
- "Desired coil definition not found "
- f"(type = {ch['coil_type']} acc = {acc})"
+ f"Desired coil definition not found (type = {ch['coil_type']} acc = {acc})"
)
# Apply a coordinate transformation if so desired
@@ -295,8 +294,8 @@ def _setup_bem(bem, bem_extra, neeg, mri_head_t, allow_none=False, verbose=None)
else:
if bem["surfs"][0]["coord_frame"] != FIFF.FIFFV_COORD_MRI:
raise RuntimeError(
- f'BEM is in {_coord_frame_name(bem["surfs"][0]["coord_frame"])} '
- 'coordinates, should be in MRI'
+ f"BEM is in {_coord_frame_name(bem['surfs'][0]['coord_frame'])} "
+ "coordinates, should be in MRI"
)
if neeg > 0 and len(bem["surfs"]) == 1:
raise RuntimeError(
@@ -335,7 +334,7 @@ def _prep_meg_channels(
del picks
# Get channel info and names for MEG channels
- logger.info(f'Read {len(info_meg["chs"])} MEG channels from info')
+ logger.info(f"Read {len(info_meg['chs'])} MEG channels from info")
# Get MEG compensation channels
compensator = post_picks = None
@@ -352,7 +351,7 @@ def _prep_meg_channels(
'channels. Consider using "ignore_ref=True" in '
"calculation"
)
- logger.info(f'{len(info["comps"])} compensation data sets in info')
+ logger.info(f"{len(info['comps'])} compensation data sets in info")
# Compose a compensation data set if necessary
# adapted from mne_make_ctf_comp() from mne_ctf_comp.c
logger.info("Setting up compensation data...")
diff --git a/mne/forward/forward.py b/mne/forward/forward.py
index e3e5c08d2f8..f1c2c2d11d7 100644
--- a/mne/forward/forward.py
+++ b/mne/forward/forward.py
@@ -512,7 +512,7 @@ def _merge_fwds(fwds, *, verbose=None):
a[k]["row_names"] = a[k]["row_names"] + b[k]["row_names"]
a["nchan"] = a["nchan"] + b["nchan"]
if len(fwds) > 1:
- logger.info(f' Forward solutions combined: {", ".join(combined)}')
+ logger.info(f" Forward solutions combined: {', '.join(combined)}")
return fwd
@@ -677,8 +677,7 @@ def read_forward_solution(fname, include=(), exclude=(), *, ordered=True, verbos
# Make sure forward solution is in either the MRI or HEAD coordinate frame
if fwd["coord_frame"] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):
raise ValueError(
- "Only forward solutions computed in MRI or head "
- "coordinates are acceptable"
+ "Only forward solutions computed in MRI or head coordinates are acceptable"
)
# Transform each source space to the HEAD or MRI coordinate frame,
@@ -1205,8 +1204,7 @@ def _triage_loose(src, loose, fixed="auto"):
if fixed is True:
if not all(v == 0.0 for v in loose.values()):
raise ValueError(
- 'When using fixed=True, loose must be 0. or "auto", '
- f"got {orig_loose}"
+ f'When using fixed=True, loose must be 0. or "auto", got {orig_loose}'
)
elif fixed is False:
if any(v == 0.0 for v in loose.values()):
@@ -1666,8 +1664,7 @@ def apply_forward(
for ch_name in fwd["sol"]["row_names"]:
if ch_name not in info["ch_names"]:
raise ValueError(
- f"Channel {ch_name} of forward operator not present in "
- "evoked_template."
+ f"Channel {ch_name} of forward operator not present in evoked_template."
)
# project the source estimate to the sensor space
diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py
index 98e3fbfc0b3..b365a2eed5a 100644
--- a/mne/gui/_coreg.py
+++ b/mne/gui/_coreg.py
@@ -1611,8 +1611,7 @@ def _configure_dock(self):
func=self._set_subjects_dir,
is_directory=True,
icon=True,
- tooltip="Load the path to the directory containing the "
- "FreeSurfer subjects",
+ tooltip="Load the path to the directory containing the FreeSurfer subjects",
layout=subjects_dir_layout,
)
self._renderer._layout_add_widget(
@@ -1741,8 +1740,7 @@ def _configure_dock(self):
self._widgets["omit"] = self._renderer._dock_add_button(
name="Omit",
callback=self._omit_hsp,
- tooltip="Exclude the head shape points that are far away from "
- "the MRI head",
+ tooltip="Exclude the head shape points that are far away from the MRI head",
layout=omit_hsp_layout_2,
)
self._widgets["reset_omit"] = self._renderer._dock_add_button(
diff --git a/mne/html_templates/_templates.py b/mne/html_templates/_templates.py
index 9427f2d6a25..1f68303a51e 100644
--- a/mne/html_templates/_templates.py
+++ b/mne/html_templates/_templates.py
@@ -66,7 +66,7 @@ def _format_time_range(inst) -> str:
def _format_projs(info) -> list[str]:
"""Format projectors."""
- projs = [f'{p["desc"]} ({"on" if p["active"] else "off"})' for p in info["projs"]]
+ projs = [f"{p['desc']} ({'on' if p['active'] else 'off'})" for p in info["projs"]]
return projs
diff --git a/mne/io/array/__init__.py b/mne/io/array/__init__.py
index aea21ef42ce..ad53f7c817f 100644
--- a/mne/io/array/__init__.py
+++ b/mne/io/array/__init__.py
@@ -4,4 +4,4 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
-from .array import RawArray
+from ._array import RawArray
diff --git a/mne/io/array/array.py b/mne/io/array/_array.py
similarity index 100%
rename from mne/io/array/array.py
rename to mne/io/array/_array.py
diff --git a/mne/io/artemis123/tests/test_artemis123.py b/mne/io/artemis123/tests/test_artemis123.py
index 039108eb915..610f32ba5da 100644
--- a/mne/io/artemis123/tests/test_artemis123.py
+++ b/mne/io/artemis123/tests/test_artemis123.py
@@ -35,9 +35,9 @@ def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.0):
angle = np.rad2deg(_angle_between_quats(quat_est, quat))
dist = np.linalg.norm(trans - trans_est)
- assert (
- dist <= dist_tol
- ), f"{1000 * dist:0.3f} > {1000 * dist_tol:0.3f} mm translation"
+ assert dist <= dist_tol, (
+ f"{1000 * dist:0.3f} > {1000 * dist_tol:0.3f} mm translation"
+ )
assert angle <= angle_tol, f"{angle:0.3f} > {angle_tol:0.3f}° rotation"
diff --git a/mne/io/base.py b/mne/io/base.py
index 4f5f2436bd7..280330367f7 100644
--- a/mne/io/base.py
+++ b/mne/io/base.py
@@ -1013,8 +1013,7 @@ def get_data(
if n_rejected > 0:
if reject_by_annotation == "omit":
msg = (
- "Omitting {} of {} ({:.2%}) samples, retaining {}"
- " ({:.2%}) samples."
+ "Omitting {} of {} ({:.2%}) samples, retaining {} ({:.2%}) samples."
)
logger.info(
msg.format(
@@ -2157,7 +2156,7 @@ def append(self, raws, preload=None):
for edge_samp in edge_samps:
onset = _sync_onset(self, edge_samp / self.info["sfreq"], True)
logger.debug(
- f"Marking edge at {edge_samp} samples " f"(maps to {onset:0.3f} sec)"
+ f"Marking edge at {edge_samp} samples (maps to {onset:0.3f} sec)"
)
self.annotations.append(onset, 0.0, "BAD boundary")
self.annotations.append(onset, 0.0, "EDGE boundary")
diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py
index 44a4e39adf6..971ac51c2f6 100644
--- a/mne/io/ctf/ctf.py
+++ b/mne/io/ctf/ctf.py
@@ -267,7 +267,7 @@ def _get_sample_info(fname, res4, system_clock):
fid.seek(offset, 0)
this_data = np.fromfile(fid, ">i4", res4["nsamp"])
if len(this_data) != res4["nsamp"]:
- raise RuntimeError(f"Cannot read data for trial {t+1}.")
+ raise RuntimeError(f"Cannot read data for trial {t + 1}.")
end = np.where(this_data == 0)[0]
if len(end) > 0:
n_samp = samp_offset + end[0]
diff --git a/mne/io/ctf/info.py b/mne/io/ctf/info.py
index 1b96d8bd88f..685a20792d3 100644
--- a/mne/io/ctf/info.py
+++ b/mne/io/ctf/info.py
@@ -50,8 +50,7 @@ def _pick_isotrak_and_hpi_coils(res4, coils, t):
if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
if t is None or t["t_ctf_dev_dev"] is None:
raise RuntimeError(
- "No coordinate transformation "
- "available for HPI coil locations"
+ "No coordinate transformation available for HPI coil locations"
)
d = dict(
kind=kind,
diff --git a/mne/io/ctf/tests/test_ctf.py b/mne/io/ctf/tests/test_ctf.py
index 4a5dd846655..448ea90baba 100644
--- a/mne/io/ctf/tests/test_ctf.py
+++ b/mne/io/ctf/tests/test_ctf.py
@@ -243,9 +243,9 @@ def test_read_ctf(tmp_path):
# Make sure all digitization points are in the MNE head coord frame
for p in raw.info["dig"]:
- assert (
- p["coord_frame"] == FIFF.FIFFV_COORD_HEAD
- ), "dig points must be in FIFF.FIFFV_COORD_HEAD"
+ assert p["coord_frame"] == FIFF.FIFFV_COORD_HEAD, (
+ "dig points must be in FIFF.FIFFV_COORD_HEAD"
+ )
if fname.endswith("catch-alp-good-f.ds"): # omit points from .pos file
with raw.info._unlock():
diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py
index bb79c46f24a..09ac24f753e 100644
--- a/mne/io/edf/edf.py
+++ b/mne/io/edf/edf.py
@@ -436,21 +436,24 @@ def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, cals,
ones[orig_idx, smp_read : smp_read + len(one_i)] = one_i
n_smp_read[orig_idx] += len(one_i)
+ # resample channels with lower sample frequency
# skip if no data was requested, ie. only annotations were read
- if sum(n_smp_read) > 0:
+ if any(n_smp_read) > 0:
# expected number of samples, equals maximum sfreq
smp_exp = data.shape[-1]
- assert max(n_smp_read) == smp_exp
# resample data after loading all chunks to prevent edge artifacts
resampled = False
+
for i, smp_read in enumerate(n_smp_read):
# nothing read, nothing to resample
if smp_read == 0:
continue
# upsample if n_samples is lower than from highest sfreq
if smp_read != smp_exp:
- assert (ones[i, smp_read:] == 0).all() # sanity check
+ # sanity check that we read exactly how much we expected
+ assert (ones[i, smp_read:] == 0).all()
+
ones[i, :] = resample(
ones[i, :smp_read].astype(np.float64),
smp_exp,
@@ -628,7 +631,7 @@ def _get_info(
if len(chs_without_types):
msg = (
"Could not determine channel type of the following channels, "
- f'they will be set as EEG:\n{", ".join(chs_without_types)}'
+ f"they will be set as EEG:\n{', '.join(chs_without_types)}"
)
logger.info(msg)
@@ -712,8 +715,8 @@ def _get_info(
if info["highpass"] > info["lowpass"]:
warn(
- f'Highpass cutoff frequency {info["highpass"]} is greater '
- f'than lowpass cutoff frequency {info["lowpass"]}, '
+ f"Highpass cutoff frequency {info['highpass']} is greater "
+ f"than lowpass cutoff frequency {info['lowpass']}, "
"setting values to 0 and Nyquist."
)
info["highpass"] = 0.0
diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py
index b4f0ab33fa5..ce671ca7e81 100644
--- a/mne/io/edf/tests/test_edf.py
+++ b/mne/io/edf/tests/test_edf.py
@@ -259,6 +259,24 @@ def test_edf_different_sfreqs(stim_channel):
assert_allclose(times1, times2)
+@testing.requires_testing_data
+@pytest.mark.parametrize("stim_channel", (None, False, "auto"))
+def test_edf_different_sfreqs_nopreload(stim_channel):
+ """Test loading smaller sfreq channels without preloading."""
+ # load without preloading, then load a channel that has smaller sfreq
+ # as other channels, produced an error, see mne-python/issues/12897
+
+ for i in range(1, 13):
+ raw = read_raw_edf(input_fname=edf_reduced, verbose="error", preload=False)
+
+ # this should work for channels of all sfreq, even if larger sfreqs
+ # are present in the file
+ x1 = raw.get_data(picks=[f"A{i}"], return_times=False)
+ # load next ch, this is sometimes with a higher sometimes a lower sfreq
+ x2 = raw.get_data([f"A{i + 1}"], return_times=False)
+ assert x1.shape == x2.shape
+
+
def test_edf_data_broken(tmp_path):
"""Test edf files."""
raw = _test_raw_reader(
diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py
index b2f08020e15..c3a10fb72cd 100644
--- a/mne/io/egi/egimff.py
+++ b/mne/io/egi/egimff.py
@@ -106,7 +106,7 @@ def _read_mff_header(filepath):
if bad:
raise RuntimeError(
"EGI epoch first/last samps could not be parsed:\n"
- f'{list(epochs["first_samps"])}\n{list(epochs["last_samps"])}'
+ f"{list(epochs['first_samps'])}\n{list(epochs['last_samps'])}"
)
summaryinfo.update(epochs)
# index which samples in raw are actually readable from disk (i.e., not
diff --git a/mne/io/fieldtrip/fieldtrip.py b/mne/io/fieldtrip/fieldtrip.py
index 5d94d3e0a80..c8521722003 100644
--- a/mne/io/fieldtrip/fieldtrip.py
+++ b/mne/io/fieldtrip/fieldtrip.py
@@ -7,7 +7,7 @@
from ...epochs import EpochsArray
from ...evoked import EvokedArray
from ...utils import _check_fname, _import_pymatreader_funcs
-from ..array.array import RawArray
+from ..array._array import RawArray
from .utils import (
_create_event_metadata,
_create_events,
diff --git a/mne/io/fil/tests/test_fil.py b/mne/io/fil/tests/test_fil.py
index 06d3d924319..df15dd13353 100644
--- a/mne/io/fil/tests/test_fil.py
+++ b/mne/io/fil/tests/test_fil.py
@@ -87,9 +87,9 @@ def _fil_megmag(raw_test, raw_mat):
mat_list = raw_mat["label"]
mat_inds = _match_str(test_list, mat_list)
- assert len(mat_inds) == len(
- test_inds
- ), "Number of magnetometer channels in RAW does not match .mat file!"
+ assert len(mat_inds) == len(test_inds), (
+ "Number of magnetometer channels in RAW does not match .mat file!"
+ )
a = raw_test._data[test_inds, :]
b = raw_mat["trial"][mat_inds, :] * 1e-15 # fT to T
@@ -106,9 +106,9 @@ def _fil_stim(raw_test, raw_mat):
mat_list = raw_mat["label"]
mat_inds = _match_str(test_list, mat_list)
- assert len(mat_inds) == len(
- test_inds
- ), "Number of stim channels in RAW does not match .mat file!"
+ assert len(mat_inds) == len(test_inds), (
+ "Number of stim channels in RAW does not match .mat file!"
+ )
a = raw_test._data[test_inds, :]
b = raw_mat["trial"][mat_inds, :] # fT to T
@@ -122,9 +122,9 @@ def _fil_sensorpos(raw_test, raw_mat):
grad_list = raw_mat["coil_label"]
grad_inds = _match_str(test_list, grad_list)
- assert len(grad_inds) == len(
- test_inds
- ), "Number of channels with position data in RAW does not match .mat file!"
+ assert len(grad_inds) == len(test_inds), (
+ "Number of channels with position data in RAW does not match .mat file!"
+ )
mat_pos = raw_mat["coil_pos"][grad_inds, :]
mat_ori = raw_mat["coil_ori"][grad_inds, :]
diff --git a/mne/io/neuralynx/tests/test_neuralynx.py b/mne/io/neuralynx/tests/test_neuralynx.py
index ea5cdbccdfb..18578ef4ab7 100644
--- a/mne/io/neuralynx/tests/test_neuralynx.py
+++ b/mne/io/neuralynx/tests/test_neuralynx.py
@@ -143,9 +143,9 @@ def test_neuralynx():
assert raw.info["meas_date"] == meas_date_utc, "meas_date not set correctly"
# test that channel selection worked
- assert (
- raw.ch_names == expected_chan_names
- ), "labels in raw.ch_names don't match expected channel names"
+ assert raw.ch_names == expected_chan_names, (
+ "labels in raw.ch_names don't match expected channel names"
+ )
mne_y = raw.get_data() # in V
@@ -216,9 +216,9 @@ def test_neuralynx_gaps():
n_expected_gaps = 3
n_expected_missing_samples = 130
assert len(raw.annotations) == n_expected_gaps, "Wrong number of gaps detected"
- assert (
- (mne_y[0, :] == 0).sum() == n_expected_missing_samples
- ), "Number of true and inferred missing samples differ"
+ assert (mne_y[0, :] == 0).sum() == n_expected_missing_samples, (
+ "Number of true and inferred missing samples differ"
+ )
# read in .mat files containing original gaps
matchans = ["LAHC1_3_gaps.mat", "LAHC2_3_gaps.mat"]
diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py
index 53a812e7a21..5d9b79b57cc 100644
--- a/mne/io/nirx/nirx.py
+++ b/mne/io/nirx/nirx.py
@@ -210,7 +210,7 @@ def __init__(self, fname, saturated, *, preload=False, encoding=None, verbose=No
):
warn(
"Only import of data from NIRScout devices have been "
- f'thoroughly tested. You are using a {hdr["GeneralInfo"]["Device"]}'
+ f"thoroughly tested. You are using a {hdr['GeneralInfo']['Device']}"
" device."
)
diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py
index b559ce07068..8f773533ae4 100644
--- a/mne/io/tests/test_raw.py
+++ b/mne/io/tests/test_raw.py
@@ -533,7 +533,7 @@ def _test_raw_crop(reader, t_prop, kwargs):
n_samp = 50 # crop to this number of samples (per instance)
crop_t = n_samp / raw_1.info["sfreq"]
t_start = t_prop * crop_t # also crop to some fraction into the first inst
- extra = f' t_start={t_start}, preload={kwargs.get("preload", False)}'
+ extra = f" t_start={t_start}, preload={kwargs.get('preload', False)}"
stop = (n_samp - 1) / raw_1.info["sfreq"]
raw_1.crop(0, stop)
assert len(raw_1.times) == 50
diff --git a/mne/label.py b/mne/label.py
index f68144106c3..02bf9dc09c0 100644
--- a/mne/label.py
+++ b/mne/label.py
@@ -264,8 +264,7 @@ def __init__(
if not (len(vertices) == len(values) == len(pos)):
raise ValueError(
- "vertices, values and pos need to have same "
- "length (number of vertices)"
+ "vertices, values and pos need to have same length (number of vertices)"
)
# name
@@ -416,7 +415,7 @@ def __sub__(self, other):
else:
keep = np.arange(len(self.vertices))
- name = f'{self.name or "unnamed"} - {other.name or "unnamed"}'
+ name = f"{self.name or 'unnamed'} - {other.name or 'unnamed'}"
return Label(
self.vertices[keep],
self.pos[keep],
@@ -976,8 +975,7 @@ def _get_label_src(label, src):
src = _ensure_src(src)
if src.kind != "surface":
raise RuntimeError(
- "Cannot operate on SourceSpaces that are not "
- f"surface type, got {src.kind}"
+ f"Cannot operate on SourceSpaces that are not surface type, got {src.kind}"
)
if label.hemi == "lh":
hemi_src = src[0]
@@ -1585,8 +1583,7 @@ def stc_to_label(
vertno = np.where(src[hemi_idx]["inuse"])[0]
if not len(np.setdiff1d(this_vertno, vertno)) == 0:
raise RuntimeError(
- "stc contains vertices not present "
- "in source space, did you morph?"
+ "stc contains vertices not present in source space, did you morph?"
)
tmp = np.zeros((len(vertno), this_data.shape[1]))
this_vertno_idx = np.searchsorted(vertno, this_vertno)
@@ -2151,8 +2148,7 @@ def _read_annot(fname):
cands = _read_annot_cands(dir_name)
if len(cands) == 0:
raise OSError(
- f"No such file {fname}, no candidate parcellations "
- "found in directory"
+ f"No such file {fname}, no candidate parcellations found in directory"
)
else:
raise OSError(
diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py
index e5129a4822f..7c789503ac1 100644
--- a/mne/minimum_norm/inverse.py
+++ b/mne/minimum_norm/inverse.py
@@ -673,7 +673,7 @@ def prepare_inverse_operator(
inv["eigen_leads"]["data"] = sqrt(scale) * inv["eigen_leads"]["data"]
logger.info(
- " Scaled noise and source covariance from nave = %d to" " nave = %d",
+ " Scaled noise and source covariance from nave = %d to nave = %d",
inv["nave"],
nave,
)
@@ -2011,7 +2011,7 @@ def make_inverse_operator(
logger.info(
f" scaling factor to adjust the trace = {trace_GRGT:g} "
f"(nchan = {eigen_fields.shape[0]} "
- f'nzero = {(noise_cov["eig"] <= 0).sum()})'
+ f"nzero = {(noise_cov['eig'] <= 0).sum()})"
)
# MNE-ify everything for output
diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py
index aa3f8294027..5b5c941a9ac 100644
--- a/mne/minimum_norm/tests/test_inverse.py
+++ b/mne/minimum_norm/tests/test_inverse.py
@@ -130,8 +130,7 @@ def _compare(a, b):
for k, v in a.items():
if k not in b and k not in skip_types:
raise ValueError(
- "First one had one second one didn't:\n"
- f"{k} not in {b.keys()}"
+ f"First one had one second one didn't:\n{k} not in {b.keys()}"
)
if k not in skip_types:
last_keys.pop()
diff --git a/mne/morph.py b/mne/morph.py
index 9c475bff1e9..a8278731f3c 100644
--- a/mne/morph.py
+++ b/mne/morph.py
@@ -200,8 +200,7 @@ def compute_source_morph(
if kind not in "surface" and xhemi:
raise ValueError(
- "Inter-hemispheric morphing can only be used "
- "with surface source estimates."
+ "Inter-hemispheric morphing can only be used with surface source estimates."
)
if sparse and kind != "surface":
raise ValueError("Only surface source estimates can compute a sparse morph.")
@@ -1301,8 +1300,7 @@ def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=None, verbose=No
if isinstance(grade, list):
if not len(grade) == 2:
raise ValueError(
- "grade as a list must have two elements "
- "(arrays of output vertices)"
+ "grade as a list must have two elements (arrays of output vertices)"
)
vertices = grade
else:
@@ -1385,8 +1383,7 @@ def _surf_upsampling_mat(idx_from, e, smooth):
smooth = _ensure_int(smooth, "smoothing steps")
if smooth <= 0: # == 0 is handled in a shortcut above
raise ValueError(
- "The number of smoothing operations has to be at least 0, got "
- f"{smooth}"
+ f"The number of smoothing operations has to be at least 0, got {smooth}"
)
smooth = smooth - 1
# idx will gradually expand from idx_from -> np.arange(n_tot)
diff --git a/mne/preprocessing/_fine_cal.py b/mne/preprocessing/_fine_cal.py
index b43983a87eb..06041cd7f8e 100644
--- a/mne/preprocessing/_fine_cal.py
+++ b/mne/preprocessing/_fine_cal.py
@@ -401,7 +401,7 @@ def _adjust_mag_normals(info, data, origin, ext_order, *, angle_limit, err_limit
good = not bool(reason)
assert np.allclose(np.linalg.norm(zs, axis=1), 1.0)
logger.info(f" Fit mismatch {first_err:0.2f}→{last_err:0.2f}%")
- logger.info(f' Data segment {"" if good else "un"}usable{reason}')
+ logger.info(f" Data segment {'' if good else 'un'}usable{reason}")
# Reformat zs and cals to be the n_mags (including bads)
assert zs.shape == (len(data), 3)
assert cals.shape == (len(data), 1)
diff --git a/mne/preprocessing/artifact_detection.py b/mne/preprocessing/artifact_detection.py
index 0a4c8b6a24d..8674d6e22b3 100644
--- a/mne/preprocessing/artifact_detection.py
+++ b/mne/preprocessing/artifact_detection.py
@@ -213,7 +213,7 @@ def annotate_movement(
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(
- "Omitting %5.1f%% (%3d segments): " "ω >= %5.1f°/s (max: %0.1f°/s)",
+ "Omitting %5.1f%% (%3d segments): ω >= %5.1f°/s (max: %0.1f°/s)",
bad_pct,
len(onsets),
rotation_velocity_limit,
@@ -233,7 +233,7 @@ def annotate_movement(
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(
- "Omitting %5.1f%% (%3d segments): " "v >= %5.4fm/s (max: %5.4fm/s)",
+ "Omitting %5.1f%% (%3d segments): v >= %5.4fm/s (max: %5.4fm/s)",
bad_pct,
len(onsets),
translation_velocity_limit,
@@ -286,7 +286,7 @@ def annotate_movement(
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(
- "Omitting %5.1f%% (%3d segments): " "disp >= %5.4fm (max: %5.4fm)",
+ "Omitting %5.1f%% (%3d segments): disp >= %5.4fm (max: %5.4fm)",
bad_pct,
len(onsets),
mean_distance_limit,
@@ -539,7 +539,7 @@ def annotate_break(
if ignore:
logger.info(
f"Ignoring annotations with descriptions starting "
- f'with: {", ".join(ignore)}'
+ f"with: {', '.join(ignore)}"
)
else:
annotations = annotations_from_events(
diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py
index 20e5481f89c..13b6f2ef672 100644
--- a/mne/preprocessing/eog.py
+++ b/mne/preprocessing/eog.py
@@ -213,12 +213,12 @@ def _get_eog_channel_index(ch_name, inst):
if not_found:
raise ValueError(
f"The specified EOG channel{_pl(not_found)} "
- f'cannot be found: {", ".join(not_found)}'
+ f"cannot be found: {', '.join(not_found)}"
)
eog_inds = pick_channels(inst.ch_names, include=ch_names)
- logger.info(f'Using EOG channel{_pl(ch_names)}: {", ".join(ch_names)}')
+ logger.info(f"Using EOG channel{_pl(ch_names)}: {', '.join(ch_names)}")
return eog_inds
diff --git a/mne/preprocessing/hfc.py b/mne/preprocessing/hfc.py
index f8a65510a9a..41bf6bbd232 100644
--- a/mne/preprocessing/hfc.py
+++ b/mne/preprocessing/hfc.py
@@ -68,8 +68,7 @@ def compute_proj_hfc(
n_chs = len(coils[5])
if n_chs != info["nchan"]:
raise ValueError(
- f'Only {n_chs}/{info["nchan"]} picks could be interpreted '
- "as MEG channels."
+ f"Only {n_chs}/{info['nchan']} picks could be interpreted as MEG channels."
)
S = _sss_basis(exp, coils)
del coils
diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py
index 3ea11e0531e..f35fe24c1ee 100644
--- a/mne/preprocessing/ica.py
+++ b/mne/preprocessing/ica.py
@@ -560,7 +560,7 @@ def __repr__(self):
"""ICA fit information."""
infos = self._get_infos_for_repr()
- s = f'{infos.fit_on or "no"} decomposition, method: {infos.fit_method}'
+ s = f"{infos.fit_on or 'no'} decomposition, method: {infos.fit_method}"
if infos.fit_on is not None:
s += (
@@ -568,8 +568,8 @@ def __repr__(self):
f"{infos.fit_n_samples} samples), "
f"{infos.fit_n_components} ICA components "
f"({infos.fit_n_pca_components} PCA components available), "
- f'channel types: {", ".join(infos.ch_types)}, '
- f'{len(infos.excludes) or "no"} sources marked for exclusion'
+ f"channel types: {', '.join(infos.ch_types)}, "
+ f"{len(infos.excludes) or 'no'} sources marked for exclusion"
)
return f""
@@ -698,7 +698,7 @@ def fit(
warn(
f"The following parameters passed to ICA.fit() will be "
f"ignored, as they only affect raw data (and it appears "
- f'you passed epochs): {", ".join(ignored_params)}'
+ f"you passed epochs): {', '.join(ignored_params)}"
)
picks = _picks_to_idx(
@@ -875,7 +875,7 @@ def _do_proj(self, data, log_suffix=""):
logger.info(
f" Applying projection operator with {nproj} "
f"vector{_pl(nproj)}"
- f'{" " if log_suffix else ""}{log_suffix}'
+ f"{' ' if log_suffix else ''}{log_suffix}"
)
if self.noise_cov is None: # otherwise it's in pre_whitener_
data = proj @ data
@@ -1162,7 +1162,7 @@ def get_explained_variance_ratio(self, inst, *, components=None, ch_type=None):
raise ValueError(
f"You requested operation on the channel type "
f'"{ch_type}", but only the following channel types are '
- f'supported: {", ".join(allowed_ch_types)}'
+ f"supported: {', '.join(allowed_ch_types)}"
)
del ch_type
@@ -2393,8 +2393,7 @@ def _pick_sources(self, data, include, exclude, n_pca_components):
unmixing = np.dot(unmixing, pca_components)
logger.info(
- f" Projecting back using {_n_pca_comp} "
- f"PCA component{_pl(_n_pca_comp)}"
+ f" Projecting back using {_n_pca_comp} PCA component{_pl(_n_pca_comp)}"
)
mixing = np.eye(_n_pca_comp)
mixing[: self.n_components_, : self.n_components_] = self.mixing_matrix_
@@ -3368,8 +3367,7 @@ def corrmap(
is_subject = False
else:
raise ValueError(
- "`template` must be a length-2 tuple or an array the "
- "size of the ICA maps."
+ "`template` must be a length-2 tuple or an array the size of the ICA maps."
)
template_fig, labelled_ics = None, None
diff --git a/mne/preprocessing/ieeg/_volume.py b/mne/preprocessing/ieeg/_volume.py
index b4997b2e3f8..af2dcf4328b 100644
--- a/mne/preprocessing/ieeg/_volume.py
+++ b/mne/preprocessing/ieeg/_volume.py
@@ -109,7 +109,7 @@ def _warn_missing_chs(info, dig_image, after_warp=False):
if missing_ch:
warn(
f"Channel{_pl(missing_ch)} "
- f'{", ".join(repr(ch) for ch in missing_ch)} not assigned '
+ f"{', '.join(repr(ch) for ch in missing_ch)} not assigned "
"voxels " + (f" after applying {after_warp}" if after_warp else "")
)
diff --git a/mne/preprocessing/infomax_.py b/mne/preprocessing/infomax_.py
index f0722ce5267..b445ac7116c 100644
--- a/mne/preprocessing/infomax_.py
+++ b/mne/preprocessing/infomax_.py
@@ -320,8 +320,7 @@ def infomax(
if l_rate > min_l_rate:
if verbose:
logger.info(
- f"... lowering learning rate to {l_rate:g}"
- "\n... re-starting..."
+ f"... lowering learning rate to {l_rate:g}\n... re-starting..."
)
else:
raise ValueError(
diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py
index 789c8520f05..8c9c0a93957 100644
--- a/mne/preprocessing/maxwell.py
+++ b/mne/preprocessing/maxwell.py
@@ -507,7 +507,7 @@ def _prep_maxwell_filter(
extended_proj_.append(proj["data"]["data"][:, idx])
extended_proj = np.concatenate(extended_proj_)
logger.info(
- " Extending external SSS basis using %d projection " "vectors",
+ " Extending external SSS basis using %d projection vectors",
len(extended_proj),
)
@@ -566,8 +566,8 @@ def _prep_maxwell_filter(
dist = np.sqrt(np.sum(_sq(diff)))
if dist > 25.0:
warn(
- f'Head position change is over 25 mm '
- f'({", ".join(f"{x:0.1f}" for x in diff)}) = {dist:0.1f} mm'
+ f"Head position change is over 25 mm "
+ f"({', '.join(f'{x:0.1f}' for x in diff)}) = {dist:0.1f} mm"
)
# Reconstruct raw file object with spatiotemporal processed data
@@ -2579,7 +2579,7 @@ def find_bad_channels_maxwell(
freq_loc = "below" if raw.info["lowpass"] < h_freq else "equal to"
msg = (
f"The input data has already been low-pass filtered with a "
- f'{raw.info["lowpass"]} Hz cutoff frequency, which is '
+ f"{raw.info['lowpass']} Hz cutoff frequency, which is "
f"{freq_loc} the requested cutoff of {h_freq} Hz. Not "
f"applying low-pass filter."
)
diff --git a/mne/preprocessing/nirs/_beer_lambert_law.py b/mne/preprocessing/nirs/_beer_lambert_law.py
index 92a2e55b9fb..c17cf31110c 100644
--- a/mne/preprocessing/nirs/_beer_lambert_law.py
+++ b/mne/preprocessing/nirs/_beer_lambert_law.py
@@ -76,7 +76,7 @@ def beer_lambert_law(raw, ppf=6.0):
for ki, kind in zip((ii, jj), ("hbo", "hbr")):
ch = raw.info["chs"][ki]
ch.update(coil_type=coil_dict[kind], unit=FIFF.FIFF_UNIT_MOL)
- new_name = f'{ch["ch_name"].split(" ")[0]} {kind}'
+ new_name = f"{ch['ch_name'].split(' ')[0]} {kind}"
rename[ch["ch_name"]] = new_name
raw.rename_channels(rename)
diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py
index f5e816258f8..002d4555ff8 100644
--- a/mne/preprocessing/tests/test_maxwell.py
+++ b/mne/preprocessing/tests/test_maxwell.py
@@ -980,9 +980,9 @@ def _assert_shielding(raw_sss, erm_power, min_factor, max_factor=np.inf, meg="ma
sss_power = raw_sss[picks][0].ravel()
sss_power = np.sqrt(np.sum(sss_power * sss_power))
factor = erm_power / sss_power
- assert (
- min_factor <= factor < max_factor
- ), f"Shielding factor not {min_factor:0.3f} <= {factor:0.3f} < {max_factor:0.3f}"
+ assert min_factor <= factor < max_factor, (
+ f"Shielding factor not {min_factor:0.3f} <= {factor:0.3f} < {max_factor:0.3f}"
+ )
@buggy_mkl_svd
diff --git a/mne/preprocessing/xdawn.py b/mne/preprocessing/xdawn.py
index 0b1132761b1..606b49370df 100644
--- a/mne/preprocessing/xdawn.py
+++ b/mne/preprocessing/xdawn.py
@@ -198,8 +198,7 @@ def _fit_xdawn(
evals, evecs = linalg.eigh(evo_cov, signal_cov)
except np.linalg.LinAlgError as exp:
raise ValueError(
- "Could not compute eigenvalues, ensure "
- f"proper regularization ({exp})"
+ f"Could not compute eigenvalues, ensure proper regularization ({exp})"
)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
diff --git a/mne/report/report.py b/mne/report/report.py
index 732c1a5c8b3..852feebc638 100644
--- a/mne/report/report.py
+++ b/mne/report/report.py
@@ -324,7 +324,7 @@ def _check_tags(tags) -> tuple[str]:
raise TypeError(
f"All tags must be strings without spaces or special characters, "
f"but got the following instead: "
- f'{", ".join([str(tag) for tag in bad_tags])}'
+ f"{', '.join([str(tag) for tag in bad_tags])}"
)
# Check for invalid characters
@@ -338,7 +338,7 @@ def _check_tags(tags) -> tuple[str]:
if bad_tags:
raise ValueError(
f"The following tags contained invalid characters: "
- f'{", ".join(repr(tag) for tag in bad_tags)}'
+ f"{', '.join(repr(tag) for tag in bad_tags)}"
)
return tags
@@ -429,8 +429,7 @@ def _fig_to_img(
output = BytesIO()
dpi = fig.get_dpi()
logger.debug(
- f"Saving figure with dimension {fig.get_size_inches()} inches with "
- f"{dpi} dpi"
+ f"Saving figure with dimension {fig.get_size_inches()} inches with {dpi} dpi"
)
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html
@@ -913,7 +912,7 @@ def __repr__(self):
if len(titles) > 0:
titles = [f" {t}" for t in titles] # indent
tr = max(len(s), 50) # trim to larger of opening str and 50
- titles = [f"{t[:tr - 2]} …" if len(t) > tr else t for t in titles]
+ titles = [f"{t[: tr - 2]} …" if len(t) > tr else t for t in titles]
# then trim to the max length of all of these
tr = max(len(title) for title in titles)
tr = max(tr, len(s))
@@ -2761,9 +2760,7 @@ def _init_render(self, verbose=None):
if inc_fname.endswith(".js"):
include.append(
- f'"
+ f''
)
elif inc_fname.endswith(".css"):
include.append(f'')
@@ -3649,7 +3646,7 @@ def _add_evoked_joint(
)
)
- title = f'Time course ({_handle_default("titles")[ch_type]})'
+ title = f"Time course ({_handle_default('titles')[ch_type]})"
self._add_figure(
fig=fig,
title=title,
@@ -4121,7 +4118,7 @@ def _add_epochs(
assert "eeg" in ch_type
title_start = "ERP image"
- title = f'{title_start} ({_handle_default("titles")[ch_type]})'
+ title = f"{title_start} ({_handle_default('titles')[ch_type]})"
self._add_figure(
fig=fig,
diff --git a/mne/source_estimate.py b/mne/source_estimate.py
index 024d630535c..deeb3a43ede 100644
--- a/mne/source_estimate.py
+++ b/mne/source_estimate.py
@@ -1388,8 +1388,7 @@ def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
]
else:
raise ValueError(
- "copy must be True if transformed data has "
- "more than 2 dimensions"
+ "copy must be True if transformed data has more than 2 dimensions"
)
else:
# return new or overwritten stc
@@ -3633,7 +3632,7 @@ def _volume_labels(src, labels, mri_resolution):
]
nnz = sum(len(v) != 0 for v in vertices)
logger.info(
- "%d/%d atlas regions had at least one vertex " "in the source space",
+ "%d/%d atlas regions had at least one vertex in the source space",
nnz,
len(out_labels),
)
@@ -4006,7 +4005,7 @@ def stc_near_sensors(
min_dist = pdist(pos).min() * 1000
logger.info(
- f' Minimum {"projected " if project else ""}intra-sensor distance: '
+ f" Minimum {'projected ' if project else ''}intra-sensor distance: "
f"{min_dist:0.1f} mm"
)
@@ -4034,7 +4033,7 @@ def stc_near_sensors(
if len(missing):
warn(
f"Channel{_pl(missing)} missing in STC: "
- f'{", ".join(evoked.ch_names[mi] for mi in missing)}'
+ f"{', '.join(evoked.ch_names[mi] for mi in missing)}"
)
nz_data = w @ evoked.data
diff --git a/mne/source_space/_source_space.py b/mne/source_space/_source_space.py
index f5e8b76a1fa..d64989961cf 100644
--- a/mne/source_space/_source_space.py
+++ b/mne/source_space/_source_space.py
@@ -743,7 +743,7 @@ def export_volume(
# generate use warnings for clipping
if n_diff > 0:
warn(
- f'{n_diff} {src["type"]} vertices lay outside of volume '
+ f"{n_diff} {src['type']} vertices lay outside of volume "
f"space. Consider using a larger volume space."
)
# get surface id or use default value
@@ -1546,7 +1546,7 @@ def setup_source_space(
# pre-load ico/oct surf (once) for speed, if necessary
if stype not in ("spacing", "all"):
logger.info(
- f'Doing the {dict(ico="icosa", oct="octa")[stype]}hedral vertex picking...'
+ f"Doing the {dict(ico='icosa', oct='octa')[stype]}hedral vertex picking..."
)
for hemi, surf in zip(["lh", "rh"], surfs):
logger.info(f"Loading {surf}...")
@@ -2916,8 +2916,7 @@ def _get_vertex_map_nn(
raise RuntimeError(f"vertex {one} would be used multiple times.")
one = one[0]
logger.info(
- "Source space vertex moved from %d to %d because of "
- "double occupation.",
+ "Source space vertex moved from %d to %d because of double occupation.",
was,
one,
)
@@ -3167,8 +3166,7 @@ def _compare_source_spaces(src0, src1, mode="exact", nearest=True, dist_tol=1.5e
assert_array_equal(
s["vertno"],
np.where(s["inuse"])[0],
- f'src{ii}[{si}]["vertno"] != '
- f'np.where(src{ii}[{si}]["inuse"])[0]',
+ f'src{ii}[{si}]["vertno"] != np.where(src{ii}[{si}]["inuse"])[0]',
)
assert_equal(len(s0["vertno"]), len(s1["vertno"]))
agreement = np.mean(s0["inuse"] == s1["inuse"])
diff --git a/mne/surface.py b/mne/surface.py
index 21432e7edfd..9e24147a080 100644
--- a/mne/surface.py
+++ b/mne/surface.py
@@ -214,7 +214,7 @@ def get_meg_helmet_surf(info, trans=None, *, verbose=None):
]
)
logger.info(
- "Getting helmet for system %s (derived from %d MEG " "channel locations)",
+ "Getting helmet for system %s (derived from %d MEG channel locations)",
system,
len(rr),
)
@@ -733,7 +733,7 @@ def __init__(self, surf, *, mode="old", verbose=None):
else:
self._init_old()
logger.debug(
- f'Setting up {mode} interior check for {len(self.surf["rr"])} '
+ f"Setting up {mode} interior check for {len(self.surf['rr'])} "
f"points took {(time.time() - t0) * 1000:0.1f} ms"
)
@@ -761,8 +761,7 @@ def _init_pyvista(self):
def __call__(self, rr, n_jobs=None, verbose=None):
n_orig = len(rr)
logger.info(
- f"Checking surface interior status for "
- f'{n_orig} point{_pl(n_orig, " ")}...'
+ f"Checking surface interior status for {n_orig} point{_pl(n_orig, ' ')}..."
)
t0 = time.time()
if self.mode == "pyvista":
@@ -770,7 +769,7 @@ def __call__(self, rr, n_jobs=None, verbose=None):
else:
inside = self._call_old(rr, n_jobs)
n = inside.sum()
- logger.info(f' Total {n}/{n_orig} point{_pl(n, " ")} inside the surface')
+ logger.info(f" Total {n}/{n_orig} point{_pl(n, ' ')} inside the surface")
logger.info(f"Interior check completed in {(time.time() - t0) * 1000:0.1f} ms")
return inside
@@ -792,7 +791,7 @@ def _call_old(self, rr, n_jobs):
n = (in_mask).sum()
n_pad = str(n).rjust(prec)
logger.info(
- f' Found {n_pad}/{n_orig} point{_pl(n, " ")} '
+ f" Found {n_pad}/{n_orig} point{_pl(n, ' ')} "
f"inside an interior sphere of radius "
f"{1000 * self.inner_r:6.1f} mm"
)
@@ -801,7 +800,7 @@ def _call_old(self, rr, n_jobs):
n = (out_mask).sum()
n_pad = str(n).rjust(prec)
logger.info(
- f' Found {n_pad}/{n_orig} point{_pl(n, " ")} '
+ f" Found {n_pad}/{n_orig} point{_pl(n, ' ')} "
f"outside an exterior sphere of radius "
f"{1000 * self.outer_r:6.1f} mm"
)
@@ -818,7 +817,7 @@ def _call_old(self, rr, n_jobs):
n_pad = str(n).rjust(prec)
check_pad = str(len(del_outside)).rjust(prec)
logger.info(
- f' Found {n_pad}/{check_pad} point{_pl(n, " ")} outside using '
+ f" Found {n_pad}/{check_pad} point{_pl(n, ' ')} outside using "
"surface Qhull"
)
@@ -828,7 +827,7 @@ def _call_old(self, rr, n_jobs):
n_pad = str(n).rjust(prec)
check_pad = str(len(solid_outside)).rjust(prec)
logger.info(
- f' Found {n_pad}/{check_pad} point{_pl(n, " ")} outside using '
+ f" Found {n_pad}/{check_pad} point{_pl(n, ' ')} outside using "
"solid angles"
)
inside[idx[solid_outside]] = False
diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py
index 6b1356ae107..4d0db170e2a 100644
--- a/mne/tests/test_annotations.py
+++ b/mne/tests/test_annotations.py
@@ -1450,8 +1450,7 @@ def test_repr():
# long annotation repr (> 79 characters, will be shortened)
r = repr(Annotations(range(14), [0] * 14, list("abcdefghijklmn")))
assert r == (
- ""
+ ""
)
# empty Annotations
diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py
index e93d4031646..f230eaa4256 100644
--- a/mne/tests/test_dipole.py
+++ b/mne/tests/test_dipole.py
@@ -214,9 +214,9 @@ def test_dipole_fitting(tmp_path):
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data**2, axis=0))
resi_rms = np.sqrt(np.sum(residual.data**2, axis=0))
- assert (
- data_rms > resi_rms * 0.95
- ).all(), f"{(data_rms / resi_rms).min()} (factor: {0.95})"
+ assert (data_rms > resi_rms * 0.95).all(), (
+ f"{(data_rms / resi_rms).min()} (factor: {0.95})"
+ )
# Compare to original points
transform_surface_to(fwd["src"][0], "head", fwd["mri_head_t"])
diff --git a/mne/tests/test_docstring_parameters.py b/mne/tests/test_docstring_parameters.py
index c94da5e5ab8..64f80f50b74 100644
--- a/mne/tests/test_docstring_parameters.py
+++ b/mne/tests/test_docstring_parameters.py
@@ -222,8 +222,7 @@ def test_tabs():
continue
source = inspect.getsource(mod)
assert "\t" not in source, (
- f'"{modname}" has tabs, please remove them '
- "or add it to the ignore list"
+ f'"{modname}" has tabs, please remove them or add it to the ignore list'
)
diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py
index 079a2b53ec9..aa11082238f 100644
--- a/mne/tests/test_epochs.py
+++ b/mne/tests/test_epochs.py
@@ -479,12 +479,12 @@ def test_average_movements():
def _assert_drop_log_types(drop_log):
__tracebackhide__ = True
assert isinstance(drop_log, tuple), "drop_log should be tuple"
- assert all(
- isinstance(log, tuple) for log in drop_log
- ), "drop_log[ii] should be tuple"
- assert all(
- isinstance(s, str) for log in drop_log for s in log
- ), "drop_log[ii][jj] should be str"
+ assert all(isinstance(log, tuple) for log in drop_log), (
+ "drop_log[ii] should be tuple"
+ )
+ assert all(isinstance(s, str) for log in drop_log for s in log), (
+ "drop_log[ii][jj] should be str"
+ )
def test_reject():
diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py
index e259ececbce..537f1930f45 100644
--- a/mne/tests/test_filter.py
+++ b/mne/tests/test_filter.py
@@ -90,9 +90,9 @@ def test_estimate_ringing():
(0.0001, (30000, 60000)),
): # 37993
n_ring = estimate_ringing_samples(butter(3, thresh, output=kind))
- assert (
- lims[0] <= n_ring <= lims[1]
- ), f"{kind} {thresh}: {lims[0]} <= {n_ring} <= {lims[1]}"
+ assert lims[0] <= n_ring <= lims[1], (
+ f"{kind} {thresh}: {lims[0]} <= {n_ring} <= {lims[1]}"
+ )
with pytest.warns(RuntimeWarning, match="properly estimate"):
assert estimate_ringing_samples(butter(4, 0.00001)) == 100000
diff --git a/mne/time_frequency/_stft.py b/mne/time_frequency/_stft.py
index 8fb80b43fcc..a6b6f23fff7 100644
--- a/mne/time_frequency/_stft.py
+++ b/mne/time_frequency/_stft.py
@@ -59,8 +59,7 @@ def stft(x, wsize, tstep=None, verbose=None):
if (wsize % tstep) or (tstep % 2):
raise ValueError(
- "The step size must be a multiple of 2 and a "
- "divider of the window length."
+ "The step size must be a multiple of 2 and a divider of the window length."
)
if tstep > wsize / 2:
diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py
index c858dd52e57..4ddaa0ac6a3 100644
--- a/mne/time_frequency/csd.py
+++ b/mne/time_frequency/csd.py
@@ -224,8 +224,7 @@ def sum(self, fmin=None, fmax=None):
"""
if self._is_sum:
raise RuntimeError(
- "This CSD matrix already represents a mean or "
- "sum across frequencies."
+ "This CSD matrix already represents a mean or sum across frequencies."
)
# Deal with the various ways in which fmin and fmax can be specified
@@ -1372,7 +1371,7 @@ def _execute_csd_function(
logger.info("[done]")
if ch_names is None:
- ch_names = [f"SERIES{i+1:03}" for i in range(n_channels)]
+ ch_names = [f"SERIES{i + 1:03}" for i in range(n_channels)]
return CrossSpectralDensity(
csds_mean,
diff --git a/mne/time_frequency/spectrum.py b/mne/time_frequency/spectrum.py
index b1de7f11c0f..03a57010061 100644
--- a/mne/time_frequency/spectrum.py
+++ b/mne/time_frequency/spectrum.py
@@ -311,7 +311,7 @@ def __init__(
if np.isfinite(fmax) and (fmax > self.sfreq / 2):
raise ValueError(
f"Requested fmax ({fmax} Hz) must not exceed ½ the sampling "
- f'frequency of the data ({0.5 * inst.info["sfreq"]} Hz).'
+ f"frequency of the data ({0.5 * inst.info['sfreq']} Hz)."
)
# method
self._inst_type = type(inst)
@@ -442,7 +442,7 @@ def _check_values(self):
if bad_value.any():
chs = np.array(self.ch_names)[bad_value].tolist()
s = _pl(bad_value.sum())
- warn(f'Zero value in spectrum for channel{s} {", ".join(chs)}', UserWarning)
+ warn(f"Zero value in spectrum for channel{s} {', '.join(chs)}", UserWarning)
def _returns_complex_tapers(self, **method_kw):
return self.method == "multitaper" and method_kw.get("output") == "complex"
@@ -1536,7 +1536,7 @@ def average(self, method="mean"):
state["nave"] = state["data"].shape[0]
state["data"] = method(state["data"])
state["dims"] = state["dims"][1:]
- state["data_type"] = f'Averaged {state["data_type"]}'
+ state["data_type"] = f"Averaged {state['data_type']}"
defaults = dict(
method=None,
fmin=None,
@@ -1689,12 +1689,12 @@ def combine_spectrum(all_spectrum, weights="nave"):
ch_names = spectrum.ch_names
for s_ in all_spectrum[1:]:
- assert (
- s_.ch_names == ch_names
- ), f"{spectrum} and {s_} do not contain the same channels"
- assert (
- np.max(np.abs(s_.freqs - spectrum.freqs)) < 1e-7
- ), f"{spectrum} and {s_} do not contain the same frequencies"
+ assert s_.ch_names == ch_names, (
+ f"{spectrum} and {s_} do not contain the same channels"
+ )
+ assert np.max(np.abs(s_.freqs - spectrum.freqs)) < 1e-7, (
+ f"{spectrum} and {s_} do not contain the same frequencies"
+ )
# use union of bad channels
bads = list(
diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py
index 470dbe3ccb7..6a510a09b5e 100644
--- a/mne/time_frequency/tfr.py
+++ b/mne/time_frequency/tfr.py
@@ -625,8 +625,7 @@ def _check_tfr_param(
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError(
- f"freqs must be of shape (n_freqs,), got {np.array(freqs.shape)} "
- "instead."
+ f"freqs must be of shape (n_freqs,), got {np.array(freqs.shape)} instead."
)
# Check sfreq
@@ -1211,8 +1210,8 @@ def __init__(
classname = "EpochsTFR"
# end TODO
raise ValueError(
- f'{classname} got unsupported parameter value{_pl(problem)} '
- f'{" and ".join(problem)}.'
+ f"{classname} got unsupported parameter value{_pl(problem)} "
+ f"{' and '.join(problem)}."
)
# check method
valid_methods = ["morlet", "multitaper"]
@@ -1539,7 +1538,7 @@ def _check_values(self, negative_ok=False):
s = _pl(negative_values.sum())
warn(
f"Negative value in time-frequency decomposition for channel{s} "
- f'{", ".join(chs)}',
+ f"{', '.join(chs)}",
UserWarning,
)
@@ -3961,12 +3960,12 @@ def combine_tfr(all_tfr, weights="nave"):
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
- assert (
- t_.ch_names == ch_names
- ), f"{tfr} and {t_} do not contain the same channels"
- assert (
- np.max(np.abs(t_.times - tfr.times)) < 1e-7
- ), f"{tfr} and {t_} do not contain the same time instants"
+ assert t_.ch_names == ch_names, (
+ f"{tfr} and {t_} do not contain the same channels"
+ )
+ assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, (
+ f"{tfr} and {t_} do not contain the same time instants"
+ )
# use union of bad channels
bads = list(set(tfr.info["bads"]).union(*(t_.info["bads"] for t_ in all_tfr[1:])))
@@ -4163,7 +4162,7 @@ def _read_multiple_tfrs(tfr_data, condition=None, *, verbose=None):
if len(out) == 0:
raise ValueError(
f'Cannot find condition "{condition}" in this file. '
- f'The file contains conditions {", ".join(keys)}'
+ f"The file contains conditions {', '.join(keys)}"
)
if len(out) == 1:
out = out[0]
diff --git a/mne/utils/_logging.py b/mne/utils/_logging.py
index 68963feaf61..f4d19655bbf 100644
--- a/mne/utils/_logging.py
+++ b/mne/utils/_logging.py
@@ -511,7 +511,7 @@ def _frame_info(n):
except KeyError: # in our verbose dec
pass
else:
- infos.append(f'{name.lstrip("mne.")}:{frame.f_lineno}')
+ infos.append(f"{name.lstrip('mne.')}:{frame.f_lineno}")
frame = frame.f_back
if frame is None:
break
diff --git a/mne/utils/check.py b/mne/utils/check.py
index 21360df9c83..085c51b6996 100644
--- a/mne/utils/check.py
+++ b/mne/utils/check.py
@@ -317,8 +317,7 @@ def _check_subject(
_validate_type(second, "str", "subject input")
if first is not None and first != second:
raise ValueError(
- f"{first_kind} ({repr(first)}) did not match "
- f"{second_kind} ({second})"
+ f"{first_kind} ({repr(first)}) did not match {second_kind} ({second})"
)
return second
elif first is not None:
@@ -1071,8 +1070,7 @@ def _check_sphere(sphere, info=None, sphere_units="m"):
del ch_pos["FPz"]
elif "Fpz" not in ch_pos and "Oz" in ch_pos:
logger.info(
- "Approximating Fpz location by mirroring Oz along "
- "the X and Y axes."
+ "Approximating Fpz location by mirroring Oz along the X and Y axes."
)
# This assumes Fpz and Oz have the same Z coordinate
ch_pos["Fpz"] = ch_pos["Oz"] * [-1, -1, 1]
@@ -1082,7 +1080,7 @@ def _check_sphere(sphere, info=None, sphere_units="m"):
msg = (
f'sphere="eeglab" requires digitization points of '
f"the following electrode locations in the data: "
- f'{", ".join(horizon_ch_names)}, but could not find: '
+ f"{', '.join(horizon_ch_names)}, but could not find: "
f"{ch_name}"
)
if ch_name == "Fpz":
@@ -1263,8 +1261,7 @@ def _to_rgb(*args, name="color", alpha=False):
except ValueError:
args = args[0] if len(args) == 1 else args
raise ValueError(
- f'Invalid RGB{"A" if alpha else ""} argument(s) for {name}: '
- f"{repr(args)}"
+ f"Invalid RGB{'A' if alpha else ''} argument(s) for {name}: {repr(args)}"
) from None
@@ -1288,5 +1285,5 @@ def _check_method_kwargs(func, kwargs, msg=None):
if msg is None:
msg = f'function "{func}"'
raise TypeError(
- f'Got unexpected keyword argument{s} {", ".join(invalid_kw)} for {msg}.'
+ f"Got unexpected keyword argument{s} {', '.join(invalid_kw)} for {msg}."
)
diff --git a/mne/utils/config.py b/mne/utils/config.py
index a817886c3f0..c28373fcb93 100644
--- a/mne/utils/config.py
+++ b/mne/utils/config.py
@@ -185,8 +185,7 @@ def set_memmap_min_size(memmap_min_size):
"triggers automated memory mapping, e.g., 1M or 0.5G"
),
"MNE_REPR_HTML": (
- "bool, represent some of our objects with rich HTML in a notebook "
- "environment"
+ "bool, represent some of our objects with rich HTML in a notebook environment"
),
"MNE_SKIP_NETWORK_TESTS": (
"bool, used in a test decorator (@requires_good_network) to skip "
@@ -203,8 +202,7 @@ def set_memmap_min_size(memmap_min_size):
),
"MNE_USE_CUDA": "bool, use GPU for filtering/resampling",
"MNE_USE_NUMBA": (
- "bool, use Numba just-in-time compiler for some of our intensive "
- "computations"
+ "bool, use Numba just-in-time compiler for some of our intensive computations"
),
"SUBJECTS_DIR": "path-like, directory of freesurfer MRI files for each subject",
}
@@ -583,9 +581,9 @@ def _get_numpy_libs():
for pool in pools:
if pool["internal_api"] in ("openblas", "mkl"):
return (
- f'{rename[pool["internal_api"]]} '
- f'{pool["version"]} with '
- f'{pool["num_threads"]} thread{_pl(pool["num_threads"])}'
+ f"{rename[pool['internal_api']]} "
+ f"{pool['version']} with "
+ f"{pool['num_threads']} thread{_pl(pool['num_threads'])}"
)
return bad_lib
@@ -874,7 +872,7 @@ def sys_info(
pre = "│ "
else:
pre = " | "
- out(f'\n{pre}{" " * ljust}{op.dirname(mod.__file__)}')
+ out(f"\n{pre}{' ' * ljust}{op.dirname(mod.__file__)}")
out("\n")
if not mne_version_good:
diff --git a/mne/utils/misc.py b/mne/utils/misc.py
index bb3e3ee5cab..343761aee24 100644
--- a/mne/utils/misc.py
+++ b/mne/utils/misc.py
@@ -379,7 +379,7 @@ def _assert_no_instances(cls, when=""):
check = False
if check:
if cls.__name__ == "Brain":
- ref.append(f'Brain._cleaned = {getattr(obj, "_cleaned", None)}')
+ ref.append(f"Brain._cleaned = {getattr(obj, '_cleaned', None)}")
rr = gc.get_referrers(obj)
count = 0
for r in rr:
diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py
index 247c0840858..778700c99a7 100644
--- a/mne/viz/_brain/_brain.py
+++ b/mne/viz/_brain/_brain.py
@@ -4072,28 +4072,28 @@ def _update_monotonic(lims, fmin, fmid, fmax):
if fmin is not None:
lims["fmin"] = fmin
if lims["fmax"] < fmin:
- logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}')
+ logger.debug(f" Bumping fmax = {lims['fmax']} to {fmin}")
lims["fmax"] = fmin
if lims["fmid"] < fmin:
- logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}')
+ logger.debug(f" Bumping fmid = {lims['fmid']} to {fmin}")
lims["fmid"] = fmin
assert lims["fmin"] <= lims["fmid"] <= lims["fmax"]
if fmid is not None:
lims["fmid"] = fmid
if lims["fmin"] > fmid:
- logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}')
+ logger.debug(f" Bumping fmin = {lims['fmin']} to {fmid}")
lims["fmin"] = fmid
if lims["fmax"] < fmid:
- logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}')
+ logger.debug(f" Bumping fmax = {lims['fmax']} to {fmid}")
lims["fmax"] = fmid
assert lims["fmin"] <= lims["fmid"] <= lims["fmax"]
if fmax is not None:
lims["fmax"] = fmax
if lims["fmin"] > fmax:
- logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}')
+ logger.debug(f" Bumping fmin = {lims['fmin']} to {fmax}")
lims["fmin"] = fmax
if lims["fmid"] > fmax:
- logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}')
+ logger.debug(f" Bumping fmid = {lims['fmid']} to {fmax}")
lims["fmid"] = fmax
assert lims["fmin"] <= lims["fmid"] <= lims["fmax"]
diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py
index fd2ff96579e..5d092c21713 100644
--- a/mne/viz/_brain/tests/test_brain.py
+++ b/mne/viz/_brain/tests/test_brain.py
@@ -867,9 +867,9 @@ def _assert_brain_range(brain, rng):
for key, mesh in layerer._overlays.items():
if key == "curv":
continue
- assert (
- mesh._rng == rng
- ), f"_layered_meshes[{repr(hemi)}][{repr(key)}]._rng != {rng}"
+ assert mesh._rng == rng, (
+ f"_layered_meshes[{repr(hemi)}][{repr(key)}]._rng != {rng}"
+ )
@testing.requires_testing_data
@@ -1237,9 +1237,9 @@ def test_brain_scraper(renderer_interactive_pyvistaqt, brain_gc, tmp_path):
w = img.shape[1]
w0 = size[0]
# On Linux+conda we get a width of 624, similar tweak in test_brain_init above
- assert np.isclose(w, w0, atol=30) or np.isclose(
- w, w0 * 2, atol=30
- ), f"w ∉ {{{w0}, {2 * w0}}}" # HiDPI
+ assert np.isclose(w, w0, atol=30) or np.isclose(w, w0 * 2, atol=30), (
+ f"w ∉ {{{w0}, {2 * w0}}}"
+ ) # HiDPI
@testing.requires_testing_data
diff --git a/mne/viz/_proj.py b/mne/viz/_proj.py
index 5d21afb0594..6e0cb9a4143 100644
--- a/mne/viz/_proj.py
+++ b/mne/viz/_proj.py
@@ -90,8 +90,7 @@ def plot_projs_joint(
missing = (~used.astype(bool)).sum()
if missing:
warn(
- f"{missing} projector{_pl(missing)} had no channel names "
- "present in epochs"
+ f"{missing} projector{_pl(missing)} had no channel names present in epochs"
)
del projs
ch_types = list(proj_by_type) # reduce to number we actually need
diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py
index c415d83e456..467f5cb15e7 100644
--- a/mne/viz/backends/_utils.py
+++ b/mne/viz/backends/_utils.py
@@ -317,8 +317,7 @@ def _qt_get_stylesheet(theme):
file = open(theme)
except OSError:
warn(
- "Requested theme file not found, will use light instead: "
- f"{repr(theme)}"
+ f"Requested theme file not found, will use light instead: {repr(theme)}"
)
else:
with file as fid:
diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py
index 10ec5459e02..b047de4ea32 100644
--- a/mne/viz/evoked.py
+++ b/mne/viz/evoked.py
@@ -27,6 +27,7 @@
_clean_names,
_is_numeric,
_pl,
+ _time_mask,
_to_rgb,
_validate_type,
fill_doc,
@@ -1988,10 +1989,18 @@ def plot_evoked_joint(
contours = topomap_args.get("contours", 6)
ch_type = ch_types.pop() # set should only contain one element
# Since the data has all the ch_types, we get the limits from the plot.
- vmin, vmax = ts_ax.get_ylim()
+ vmin, vmax = (None, None)
norm = ch_type == "grad"
vmin = 0 if norm else vmin
- vmin, vmax = _setup_vmin_vmax(evoked.data, vmin, vmax, norm)
+ time_idx = [
+ np.where(
+ _time_mask(evoked.times, tmin=t, tmax=None, sfreq=evoked.info["sfreq"])
+ )[0][0]
+ for t in times_sec
+ ]
+ scalings = topomap_args["scalings"] if "scalings" in topomap_args else None
+ scaling = _handle_default("scalings", scalings)[ch_type]
+ vmin, vmax = _setup_vmin_vmax(evoked.data[:, time_idx] * scaling, vmin, vmax, norm)
if not isinstance(contours, list | np.ndarray):
locator, contours = _set_contour_locator(vmin, vmax, contours)
else:
diff --git a/mne/viz/misc.py b/mne/viz/misc.py
index ed2636d3961..c83a4dfe717 100644
--- a/mne/viz/misc.py
+++ b/mne/viz/misc.py
@@ -443,7 +443,7 @@ def _plot_mri_contours(
if src[0]["coord_frame"] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
"Source space must be in MRI coordinates, got "
- f'{_frame_to_str[src[0]["coord_frame"]]}'
+ f"{_frame_to_str[src[0]['coord_frame']]}"
)
for src_ in src:
points = src_["rr"][src_["inuse"].astype(bool)]
@@ -708,8 +708,7 @@ def plot_bem(
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError(
- "src needs to be None, path-like or SourceSpaces instance, "
- f"not {repr(src)}"
+ f"src needs to be None, path-like or SourceSpaces instance, not {repr(src)}"
)
if len(surfaces) == 0:
diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py
index 6f109b9490b..34022d59768 100644
--- a/mne/viz/tests/test_3d.py
+++ b/mne/viz/tests/test_3d.py
@@ -893,7 +893,7 @@ def test_plot_alignment_fnirs(renderer, tmp_path):
with catch_logging() as log:
fig = plot_alignment(info, **kwargs)
log = log.getvalue()
- assert f'fnirs_cw_amplitude: {info["nchan"]}' in log
+ assert f"fnirs_cw_amplitude: {info['nchan']}" in log
_assert_n_actors(fig, renderer, info["nchan"])
fig = plot_alignment(info, fnirs=["channels", "sources", "detectors"], **kwargs)
diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py
index d83698acbb1..bb180a3f299 100644
--- a/mne/viz/topomap.py
+++ b/mne/viz/topomap.py
@@ -910,8 +910,7 @@ def _get_pos_outlines(info, picks, sphere, to_sphere=True):
orig_sphere = sphere
sphere, clip_origin = _adjust_meg_sphere(sphere, info, ch_type)
logger.debug(
- "Generating pos outlines with sphere "
- f"{sphere} from {orig_sphere} for {ch_type}"
+ f"Generating pos outlines with sphere {sphere} from {orig_sphere} for {ch_type}"
)
pos = _find_topomap_coords(
info, picks, ignore_overlap=True, to_sphere=to_sphere, sphere=sphere
@@ -1262,7 +1261,7 @@ def _plot_topomap(
if len(data) != len(pos):
raise ValueError(
"Data and pos need to be of same length. Got data of "
- f"length {len(data)}, pos of length { len(pos)}"
+ f"length {len(data)}, pos of length {len(pos)}"
)
norm = min(data) >= 0
@@ -1409,8 +1408,7 @@ def _plot_ica_topomap(
sphere = _check_sphere(sphere, ica.info)
if not isinstance(axes, Axes):
raise ValueError(
- "axis has to be an instance of matplotlib Axes, "
- f"got {type(axes)} instead."
+ f"axis has to be an instance of matplotlib Axes, got {type(axes)} instead."
)
ch_type = _get_plot_ch_type(ica, ch_type, allow_ref_meg=ica.allow_ref_meg)
if ch_type == "ref_meg":
@@ -2116,6 +2114,22 @@ def plot_evoked_topomap(
:ref:`gridspec ` interface to adjust the colorbar
size yourself.
+ The defaults for ``contours`` and ``vlim`` are handled as follows:
+
+ * When neither ``vlim`` nor a list of ``contours`` is passed, MNE sets
+ ``vlim`` at ± the maximum absolute value of the data and then chooses
+ contours within those bounds.
+
+ * When ``vlim`` but not a list of ``contours`` is passed, MNE chooses
+ contours to be within the ``vlim``.
+
+ * When a list of ``contours`` but not ``vlim`` is passed, MNE chooses
+ ``vlim`` to encompass the ``contours`` and the maximum absolute value of the
+ data.
+
+ * When both a list of ``contours`` and ``vlim`` are passed, MNE uses them
+ as-is.
+
When ``time=="interactive"``, the figure will publish and subscribe to the
following UI events:
@@ -2191,8 +2205,7 @@ def plot_evoked_topomap(
space = 1 / (2.0 * evoked.info["sfreq"])
if max(times) > max(evoked.times) + space or min(times) < min(evoked.times) - space:
raise ValueError(
- f"Times should be between {evoked.times[0]:0.3} and "
- f"{evoked.times[-1]:0.3}."
+ f"Times should be between {evoked.times[0]:0.3} and {evoked.times[-1]:0.3}."
)
# create axes
want_axes = n_times + int(colorbar)
@@ -2299,11 +2312,17 @@ def plot_evoked_topomap(
_vlim = [
_setup_vmin_vmax(data[:, i], *vlim, norm=merge_channels) for i in range(n_times)
]
- _vlim = (np.min(_vlim), np.max(_vlim))
+ _vlim = [np.min(_vlim), np.max(_vlim)]
cmap = _setup_cmap(cmap, n_axes=n_times, norm=_vlim[0] >= 0)
# set up contours
if not isinstance(contours, list | np.ndarray):
_, contours = _set_contour_locator(*_vlim, contours)
+ else:
+ if vlim[0] is None and np.any(contours < _vlim[0]):
+ _vlim[0] = contours[0]
+ if vlim[1] is None and np.any(contours > _vlim[1]):
+ _vlim[1] = contours[-1]
+
# prepare for main loop over times
kwargs = dict(
sensors=sensors,
@@ -2791,8 +2810,7 @@ def plot_psds_topomap(
# convert legacy list-of-tuple input to a dict
bands = {band[-1]: band[:-1] for band in bands}
logger.info(
- "converting legacy list-of-tuples input to a dict for the "
- "`bands` parameter"
+ "converting legacy list-of-tuples input to a dict for the `bands` parameter"
)
# upconvert single freqs to band upper/lower edges as needed
bin_spacing = np.diff(freqs)[0]
@@ -3352,6 +3370,7 @@ def _set_contour_locator(vmin, vmax, contours):
# correct number of bins is equal to contours + 1.
locator = ticker.MaxNLocator(nbins=contours + 1)
contours = locator.tick_values(vmin, vmax)
+ contours = contours[1:-1]
return locator, contours
diff --git a/mne/viz/utils.py b/mne/viz/utils.py
index 00458bf3908..a09da17de7d 100644
--- a/mne/viz/utils.py
+++ b/mne/viz/utils.py
@@ -2356,7 +2356,7 @@ def _gfp(data):
except KeyError:
raise ValueError(
f'"combine" must be None, a callable, or one of "{", ".join(valid)}"; '
- f'got {combine}'
+ f"got {combine}"
)
return combine
diff --git a/tools/dev/ensure_headers.py b/tools/dev/ensure_headers.py
index b5b425b5900..a4095d82b42 100644
--- a/tools/dev/ensure_headers.py
+++ b/tools/dev/ensure_headers.py
@@ -156,15 +156,15 @@ def _ensure_copyright(lines, path):
lines[insert] = COPYRIGHT_LINE
else:
lines.insert(insert, COPYRIGHT_LINE)
- assert (
- lines.count(COPYRIGHT_LINE) == 1
- ), f"{lines.count(COPYRIGHT_LINE)=} for {path=}"
+ assert lines.count(COPYRIGHT_LINE) == 1, (
+ f"{lines.count(COPYRIGHT_LINE)=} for {path=}"
+ )
def _ensure_blank(lines, path):
- assert (
- lines.count(COPYRIGHT_LINE) == 1
- ), f"{lines.count(COPYRIGHT_LINE)=} for {path=}"
+ assert lines.count(COPYRIGHT_LINE) == 1, (
+ f"{lines.count(COPYRIGHT_LINE)=} for {path=}"
+ )
insert = lines.index(COPYRIGHT_LINE) + 1
if lines[insert].strip(): # actually has content
lines.insert(insert, "")
diff --git a/tools/hooks/update_environment_file.py b/tools/hooks/update_environment_file.py
index f5e6bb335b0..0b5380a16b5 100755
--- a/tools/hooks/update_environment_file.py
+++ b/tools/hooks/update_environment_file.py
@@ -80,7 +80,7 @@ def split_dep(dep):
pip_section = pip_section if len(pip_deps) else ""
# prepare the env file
env = f"""\
-# THIS FILE IS AUTO-GENERATED BY {'/'.join(Path(__file__).parts[-3:])} AND WILL BE OVERWRITTEN
+# THIS FILE IS AUTO-GENERATED BY {"/".join(Path(__file__).parts[-3:])} AND WILL BE OVERWRITTEN
name: mne
channels:
- conda-forge
diff --git a/tutorials/forward/20_source_alignment.py b/tutorials/forward/20_source_alignment.py
index dd26f610907..c8cf981dce9 100644
--- a/tutorials/forward/20_source_alignment.py
+++ b/tutorials/forward/20_source_alignment.py
@@ -115,11 +115,11 @@
mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0.0, 0.0, 0.0))
print(
"Distance from head origin to MEG origin: "
- f"{1000 * np.linalg.norm(raw.info["dev_head_t"]["trans"][:3, 3]):.1f} mm"
+ f"{1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3]):.1f} mm"
)
print(
"Distance from head origin to MRI origin: "
- f"{1000 * np.linalg.norm(trans["trans"][:3, 3]):.1f} mm"
+ f"{1000 * np.linalg.norm(trans['trans'][:3, 3]):.1f} mm"
)
dists = mne.dig_mri_distances(raw.info, trans, "sample", subjects_dir=subjects_dir)
print(
diff --git a/tutorials/forward/30_forward.py b/tutorials/forward/30_forward.py
index 6c55d0bfe3c..72731982962 100644
--- a/tutorials/forward/30_forward.py
+++ b/tutorials/forward/30_forward.py
@@ -255,7 +255,7 @@
# or ``inv['src']`` so that this removal is adequately accounted for.
print(f"Before: {src}")
-print(f'After: {fwd["src"]}')
+print(f"After: {fwd['src']}")
# %%
# We can explore the content of ``fwd`` to access the numpy array that contains
diff --git a/tutorials/intro/15_inplace.py b/tutorials/intro/15_inplace.py
index 0c68843d4c8..01e8c1f7eb0 100644
--- a/tutorials/intro/15_inplace.py
+++ b/tutorials/intro/15_inplace.py
@@ -60,9 +60,9 @@
# Another group of methods where data is modified in-place are the
# channel-picking methods. For example:
-print(f'original data had {original_raw.info["nchan"]} channels.')
+print(f"original data had {original_raw.info['nchan']} channels.")
original_raw.pick("eeg") # selects only the EEG channels
-print(f'after picking, it has {original_raw.info["nchan"]} channels.')
+print(f"after picking, it has {original_raw.info['nchan']} channels.")
# %%
diff --git a/tutorials/preprocessing/40_artifact_correction_ica.py b/tutorials/preprocessing/40_artifact_correction_ica.py
index 5eeb7b79d64..257b1f85051 100644
--- a/tutorials/preprocessing/40_artifact_correction_ica.py
+++ b/tutorials/preprocessing/40_artifact_correction_ica.py
@@ -291,8 +291,7 @@
# This time, print as percentage.
ratio_percent = round(100 * explained_var_ratio["eeg"])
print(
- f"Fraction of variance in EEG signal explained by first component: "
- f"{ratio_percent}%"
+ f"Fraction of variance in EEG signal explained by first component: {ratio_percent}%"
)
# %%
diff --git a/tutorials/preprocessing/50_artifact_correction_ssp.py b/tutorials/preprocessing/50_artifact_correction_ssp.py
index 57be25803d5..530e6fd39d8 100644
--- a/tutorials/preprocessing/50_artifact_correction_ssp.py
+++ b/tutorials/preprocessing/50_artifact_correction_ssp.py
@@ -520,7 +520,7 @@
evoked_eeg.plot(proj=proj, axes=ax, spatial_colors=True)
parts = ax.get_title().split("(")
ylabel = (
- f'{parts[0]} ({ax.get_ylabel()})\n{parts[1].replace(")", "")}'
+ f"{parts[0]} ({ax.get_ylabel()})\n{parts[1].replace(')', '')}"
if pi == 0
else ""
)