diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index ad5457439f..50a6603d24 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -75,8 +75,9 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): "matched_filtering": "Boolean to specify whether circus 2 should detect peaks via matched filtering (slightly slower)", "cache_preprocessing": "How to cache the preprocessed recording. Mode can be memory, file, zarr, with extra arguments. In case of memory (default), \ memory_limit will control how much RAM can be used. In case of folder or zarr, delete_cache controls if cache is cleaned after sorting", - "chunk_preprocessing": "How much RAM (approximately) should be devoted to load data chunks. memory_limit will control how much RAM can be used\ - as a fraction of available memory. Otherwise, use total_memory to fix a hard limit", + "chunk_preprocessing": "How much RAM (approximately) should be devoted to load all data chunks (given n_jobs).\ + memory_limit will control how much RAM can be used as a fraction of available memory. Otherwise, use total_memory to fix a hard limit, with\ + a string syntax (e.g. '1G', '500M')", "multi_units_only": "Boolean to get only multi units activity (i.e. one template per electrode)", "job_kwargs": "A dictionary to specify how many jobs and which parameters they should used", "seed": "An int to control how chunks are shuffled while detecting peaks", diff --git a/src/spikeinterface/sortingcomponents/tools.py b/src/spikeinterface/sortingcomponents/tools.py index e55af1edfb..56b128d028 100644 --- a/src/spikeinterface/sortingcomponents/tools.py +++ b/src/spikeinterface/sortingcomponents/tools.py @@ -283,7 +283,9 @@ def set_optimal_chunk_size(recording, job_kwargs, memory_limit=0.5, total_memory chunk_duration = chunk_size / recording.get_sampling_frequency() job_kwargs = fix_job_kwargs(dict(chunk_duration=f"{chunk_duration}s")) else: - print("psutil is required to use only a fraction of available memory") + import warnings + + warnings.warn("psutil is required to use only a fraction of available memory") else: from spikeinterface.core.job_tools import convert_string_to_bytes @@ -305,7 +307,7 @@ def get_optimal_n_jobs(job_kwargs, ram_requested, memory_limit=0.25): recording: Recording The recording object - ram_requested: dict + ram_requested: int The amount of RAM (in bytes) requested for the job memory_limit: float The memory limit in fraction of available memory @@ -324,7 +326,9 @@ def get_optimal_n_jobs(job_kwargs, ram_requested, memory_limit=0.25): n_jobs = int(min(n_jobs, memory_usage // ram_requested)) job_kwargs.update(dict(n_jobs=n_jobs)) else: - print("psutil is required to use only a fraction of available memory") + import warnings + + warnings.warn("psutil is required to use only a fraction of available memory") return job_kwargs @@ -367,14 +371,20 @@ def cache_preprocessing( if recording.get_total_memory_size() < memory_usage: recording = recording.save_to_memory(format="memory", shared=True, **job_kwargs) else: - print("Recording too large to be preloaded in RAM...") + import warnings + + warnings.warn("Recording too large to be preloaded in RAM...") else: - print("psutil is required to preload in memory given only a fraction of available memory") + import warnings + + warnings.warn("psutil is required to preload in memory given only a fraction of available memory") else: if recording.get_total_memory_size() < total_memory: recording = recording.save_to_memory(format="memory", shared=True, **job_kwargs) else: - print("Recording too large to be preloaded in RAM...") + import warnings + + warnings.warn("Recording too large to be preloaded in RAM...") elif mode == "folder": recording = recording.save_to_folder(**extra_kwargs) elif mode == "zarr":