diff --git a/nwb_conversion_tools/datainterfaces/behavior/movie/movie_utils.py b/nwb_conversion_tools/datainterfaces/behavior/movie/movie_utils.py index d2b085ead..2e2238596 100644 --- a/nwb_conversion_tools/datainterfaces/behavior/movie/movie_utils.py +++ b/nwb_conversion_tools/datainterfaces/behavior/movie/movie_utils.py @@ -141,49 +141,3 @@ def __exit__(self, *args): def __del__(self): self.vc.release() - - -def get_movie_timestamps(movie_file: PathType): - """Return numpy array of the timestamps for a movie file. - - Parameters - ---------- - movie_file : PathType - """ - cap = cv2.VideoCapture(str(movie_file)) - timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)] - success, frame = cap.read() - while success: - timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC)) - success, frame = cap.read() - cap.release() - return np.array(timestamps) - - -def get_movie_fps(movie_file: PathType): - """Return the internal frames per second (fps) for a movie file. - - Parameters - ---------- - movie_file : PathType - """ - cap = cv2.VideoCapture(str(movie_file)) - if int((cv2.__version__).split(".")[0]) < 3: - fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) - else: - fps = cap.get(cv2.CAP_PROP_FPS) - cap.release() - return fps - - -def get_frame_shape(movie_file: PathType): - """Return the shape of frames from a movie file. - - Parameters - ---------- - movie_file : PathType - """ - cap = cv2.VideoCapture(str(movie_file)) - success, frame = cap.read() - cap.release() - return frame.shape diff --git a/nwb_conversion_tools/datainterfaces/behavior/movie/moviedatainterface.py b/nwb_conversion_tools/datainterfaces/behavior/movie/moviedatainterface.py index ef422d1eb..4b37b2e4e 100644 --- a/nwb_conversion_tools/datainterfaces/behavior/movie/moviedatainterface.py +++ b/nwb_conversion_tools/datainterfaces/behavior/movie/moviedatainterface.py @@ -1,30 +1,21 @@ """Authors: Cody Baker and Ben Dichter.""" from pathlib import Path -import numpy as np from typing import Optional -from tqdm import tqdm from warnings import warn +import numpy as np import psutil -from pynwb import NWBFile -from pynwb.image import ImageSeries from hdmf.backends.hdf5.h5_utils import H5DataIO from hdmf.data_utils import DataChunkIterator +from pynwb import NWBFile +from pynwb.image import ImageSeries +from tqdm import tqdm -from .movie_utils import get_movie_timestamps, get_movie_fps, get_frame_shape from ....basedatainterface import BaseDataInterface -from ....utils.nwbfile_tools import get_module from ....utils.conversion_tools import check_regular_timestamps from ....utils.json_schema import get_schema_from_hdmf_class, get_base_schema - - -try: - import cv2 - - HAVE_OPENCV = True -except ImportError: - HAVE_OPENCV = False -INSTALL_MESSAGE = "Please install opencv to use this interface! (pip install opencv-python)" +from ....utils.nwbfile_tools import get_module +from .movie_utils import VideoCaptureContext class MovieInterface(BaseDataInterface): @@ -40,7 +31,6 @@ def __init__(self, file_paths: list): Many movie storage formats segment a sequence of movies over the course of the experiment. Pass the file paths for this movies as a list in sorted, consecutive order. """ - assert HAVE_OPENCV, INSTALL_MESSAGE super().__init__(file_paths=file_paths) def get_metadata_schema(self): @@ -84,6 +74,8 @@ def run_conversion( chunk_data: bool = True, module_name: Optional[str] = None, module_description: Optional[str] = None, + compression: Optional[str] = "gzip", + compression_options: Optional[int] = None, ): """ Convert the movie data files to ImageSeries and write them in the NWBFile. @@ -106,6 +98,8 @@ def run_conversion( and may contain most keywords normally accepted by an ImageSeries (https://pynwb.readthedocs.io/en/stable/pynwb.image.html#pynwb.image.ImageSeries). The list for the 'Movies' key should correspond one to one to the movie files in the file_paths list. + If multiple movies need to be in the same ImageSeries, then supply the same value for "name" key. + Storing multiple movies in the same ImageSeries is only supported if 'external_mode'=True. stub_test : bool, optional If True, truncates the write operation for fast testing. The default is False. external_mode : bool, optional @@ -128,21 +122,19 @@ def run_conversion( module_description: str, optional If the processing module specified by module_name does not exist, it will be created with this description. The default description is the same as used by the conversion_tools.get_module function. + compression: str, optional + Compression strategy to use for HFDataIO. For full list of currently supported filters, see + https://docs.h5py.org/en/latest/high/dataset.html#lossless-compression-filters + compression_options: int, optional + Parameter(s) for compression filter. Currently only supports the compression level (integer from 0 to 9) of + compression="gzip". """ file_paths = self.source_data["file_paths"] - if stub_test: - count_max = 10 - else: - count_max = np.inf if starting_times is not None: - assert ( - isinstance(starting_times, list) - and all([isinstance(x, float) for x in starting_times]) - and len(starting_times) == len(file_paths) - ), "Argument 'starting_times' must be a list of floats in one-to-one correspondence with 'file_paths'!" - else: - starting_times = [0.0] + assert isinstance(starting_times, list) and all( + [isinstance(x, float) for x in starting_times] + ), "Argument 'starting_times' must be a list of floats." image_series_kwargs_list = metadata.get("Behavior", dict()).get( "Movies", self.get_metadata()["Behavior"]["Movies"] @@ -152,88 +144,120 @@ def run_conversion( f"({len(image_series_kwargs_list)}) vs. file_paths ({len(self.source_data['file_paths'])})!" ) - for j, file in enumerate(file_paths): - timestamps = starting_times[j] + get_movie_timestamps(movie_file=file) - - if len(starting_times) != len(file_paths): - starting_times.append(timestamps[-1]) + def _check_duplicates(image_series_kwargs_list): + image_series_kwargs_list_keys = [i["name"] for i in image_series_kwargs_list] + if len(set(image_series_kwargs_list_keys)) < len(image_series_kwargs_list_keys): + assert external_mode, "For multiple video files under the same ImageSeries name, use exernal_mode=True." + keys_set = [] + image_series_kwargs_list_unique = [] + file_paths_list = [] + for n, image_series_kwargs in enumerate(image_series_kwargs_list): + if image_series_kwargs["name"] not in keys_set: + keys_set.append(image_series_kwargs["name"]) + file_paths_list.append([file_paths[n]]) + image_series_kwargs_list_unique.append(dict(image_series_kwargs)) + else: + idx = keys_set.index(image_series_kwargs["name"]) + file_paths_list[idx].append(file_paths[n]) + return image_series_kwargs_list_unique, file_paths_list - image_series_kwargs = dict(image_series_kwargs_list[j]) - if check_regular_timestamps(ts=timestamps): - fps = get_movie_fps(movie_file=file) - image_series_kwargs.update(starting_time=starting_times[j], rate=fps) + image_series_kwargs_list_updated, file_paths_list = _check_duplicates(image_series_kwargs_list) + if starting_times is not None: + assert len(starting_times) == len(image_series_kwargs_list_updated), ( + "starting times list length must be equal number of unique ImageSeries " "containers to write to nwb" + ) + else: + if len(image_series_kwargs_list_updated) == 1: + warn("starting_times not provided, setting to 0.0") + starting_times = [0.0] else: - image_series_kwargs.update(timestamps=H5DataIO(timestamps, compression="gzip")) + raise ValueError("provide starting times as a list of len " f"{len(image_series_kwargs_list_updated)}") + for j, (image_series_kwargs, file_list) in enumerate(zip(image_series_kwargs_list_updated, file_paths_list)): if external_mode: - image_series_kwargs.update(format="external", external_file=[file]) + with VideoCaptureContext(str(file_list[0])) as vc: + fps = vc.get_movie_fps() + image_series_kwargs.update( + starting_time=starting_times[j], rate=fps, format="external", external_file=file_list + ) else: + file = file_list[0] uncompressed_estimate = Path(file).stat().st_size * 70 available_memory = psutil.virtual_memory().available - if not chunk_data and uncompressed_estimate >= available_memory: + if not chunk_data and not stub_test and uncompressed_estimate >= available_memory: warn( f"Not enough memory (estimated {round(uncompressed_estimate/1e9, 2)} GB) to load movie file as " f"array ({round(available_memory/1e9, 2)} GB available)! Forcing chunk_data to True." ) chunk_data = True - total_frames = len(timestamps) - frame_shape = get_frame_shape(movie_file=file) - maxshape = [total_frames] - maxshape.extend(frame_shape) + with VideoCaptureContext(str(file)) as video_capture_ob: + if stub_test: + video_capture_ob.frame_count = 10 + total_frames = video_capture_ob.get_movie_frame_count() + frame_shape = video_capture_ob.get_frame_shape() + timestamps = starting_times[j] + video_capture_ob.get_movie_timestamps() + fps = video_capture_ob.get_movie_fps() + maxshape = (total_frames, *frame_shape) best_gzip_chunk = (1, frame_shape[0], frame_shape[1], 3) tqdm_pos, tqdm_mininterval = (0, 10) if chunk_data: - - def data_generator(file, count_max): - cap = cv2.VideoCapture(str(file)) - for _ in range(min(count_max, total_frames)): - success, frame = cap.read() - yield frame - cap.release() - - mov = DataChunkIterator( + video_capture_ob = VideoCaptureContext(str(file)) + if stub_test: + video_capture_ob.frame_count = 10 + iterable = DataChunkIterator( data=tqdm( - iterable=data_generator(file=file, count_max=count_max), + iterable=video_capture_ob, desc=f"Copying movie data for {Path(file).name}", position=tqdm_pos, total=total_frames, mininterval=tqdm_mininterval, ), iter_axis=0, # nwb standard is time as zero axis - maxshape=tuple(maxshape), + maxshape=maxshape, + ) + data = H5DataIO( + iterable, + compression=compression, + compression_opts=compression_options, + chunks=best_gzip_chunk, ) - image_series_kwargs.update(data=H5DataIO(mov, compression="gzip", chunks=best_gzip_chunk)) else: - cap = cv2.VideoCapture(str(file)) - mov = [] - with tqdm( - desc=f"Reading movie data for {Path(file).name}", - position=tqdm_pos, - total=total_frames, - mininterval=tqdm_mininterval, - ) as pbar: - for _ in range(min(count_max, total_frames)): - success, frame = cap.read() - mov.append(frame) - pbar.update(1) - cap.release() - image_series_kwargs.update( - data=H5DataIO( - DataChunkIterator( - tqdm( - iterable=np.array(mov), - desc=f"Writing movie data for {Path(file).name}", - position=tqdm_pos, - mininterval=tqdm_mininterval, - ), - iter_axis=0, # nwb standard is time as zero axis - maxshape=tuple(maxshape), + iterable = np.zeros(shape=maxshape, dtype="uint8") + with VideoCaptureContext(str(file)) as video_capture_ob: + if stub_test: + video_capture_ob.frame_count = 10 + with tqdm( + desc=f"Reading movie data for {Path(file).name}", + position=tqdm_pos, + total=total_frames, + mininterval=tqdm_mininterval, + ) as pbar: + for n, frame in enumerate(video_capture_ob): + iterable[n, :, :, :] = frame + pbar.update(1) + data = H5DataIO( + DataChunkIterator( + tqdm( + iterable=iterable, + desc=f"Writing movie data for {Path(file).name}", + position=tqdm_pos, + mininterval=tqdm_mininterval, ), - compression="gzip", - chunks=best_gzip_chunk, - ) + iter_axis=0, # nwb standard is time as zero axis + maxshape=maxshape, + ), + compression="gzip", + compression_opts=compression_options, + chunks=best_gzip_chunk, ) + + image_series_kwargs.update(data=data) + if check_regular_timestamps(ts=timestamps): + image_series_kwargs.update(starting_time=starting_times[j], rate=fps) + else: + image_series_kwargs.update(timestamps=timestamps) + if module_name is None: nwbfile.add_acquisition(ImageSeries(**image_series_kwargs)) else: diff --git a/tests/test_internals/test_movie_interface.py b/tests/test_internals/test_movie_interface.py index 4de1f0306..824121cea 100644 --- a/tests/test_internals/test_movie_interface.py +++ b/tests/test_internals/test_movie_interface.py @@ -1,130 +1,218 @@ +import shutil +import unittest +import tempfile import numpy as np -from tempfile import mkdtemp -from shutil import rmtree -from pathlib import Path -from itertools import product -from datetime import datetime - from pynwb import NWBHDF5IO -from hdmf.testing import TestCase - -from nwb_conversion_tools import ( - NWBConverter, - MovieInterface, -) +import os +from nwb_conversion_tools import NWBConverter, MovieInterface +from datetime import datetime try: import cv2 - HAVE_OPENCV = True + skip_test = False except ImportError: - HAVE_OPENCV = False - - -class TestMovieInterface(TestCase): - def setUp(self): - self.test_dir = Path(mkdtemp()) - - def tearDown(self): - rmtree(self.test_dir) - - def test_movie_interface(self): - if HAVE_OPENCV: - movie_file = self.test_dir / "test1.avi" - nwbfile_path = str(self.test_dir / "test1.nwb") - (nf, nx, ny) = (50, 640, 480) - writer = cv2.VideoWriter( - filename=str(movie_file), - apiPreference=None, - fourcc=cv2.VideoWriter_fourcc("M", "J", "P", "G"), - fps=25, - frameSize=(ny, nx), - params=None, - ) - for k in range(nf): - writer.write(np.random.randint(0, 255, (nx, ny, 3)).astype("uint8")) - writer.release() - - class MovieTestNWBConverter(NWBConverter): - data_interface_classes = dict(Movie=MovieInterface) - - source_data = dict(Movie=dict(file_paths=[movie_file])) - converter = MovieTestNWBConverter(source_data) - metadata = converter.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone().strftime("%Y-%m-%dT%H:%M:%S")) - converter.run_conversion(metadata=metadata, nwbfile_path=nwbfile_path, overwrite=True) - - # This conversion option operates independently of all others - converter.run_conversion( - metadata=metadata, - nwbfile_path=nwbfile_path, + skip_test = True + + +@unittest.skipIf(skip_test, "cv2 not installed") +class TestMovieInterface(unittest.TestCase): + def setUp(self) -> None: + self.test_dir = tempfile.mkdtemp() + self.movie_files = self.create_movies() + self.nwb_converter = self.create_movie_converter() + self.nwbfile_path = os.path.join(self.test_dir, "movie_test.nwb") + + def create_movies(self): + movie_file1 = os.path.join(self.test_dir, "test1.avi") + movie_file2 = os.path.join(self.test_dir, "test2.avi") + (nf, nx, ny) = (30, 640, 480) + writer1 = cv2.VideoWriter( + filename=movie_file1, + apiPreference=None, + fourcc=cv2.VideoWriter_fourcc("M", "J", "P", "G"), + fps=25, + frameSize=(ny, nx), + params=None, + ) + writer2 = cv2.VideoWriter( + filename=movie_file2, + apiPreference=None, + fourcc=cv2.VideoWriter_fourcc("M", "J", "P", "G"), + fps=25, + frameSize=(ny, nx), + params=None, + ) + + for k in range(nf): + writer1.write(np.random.randint(0, 255, (nx, ny, 3)).astype("uint8")) + writer2.write(np.random.randint(0, 255, (nx, ny, 3)).astype("uint8")) + writer1.release() + writer2.release() + return [movie_file1, movie_file2] + + def create_movie_converter(self): + class MovieTestNWBConverter(NWBConverter): + data_interface_classes = dict(Movie=MovieInterface) + + source_data = dict(Movie=dict(file_paths=self.movie_files)) + return MovieTestNWBConverter(source_data) + + def get_metadata(self): + metadata = self.nwb_converter.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone().strftime("%Y-%m-%dT%H:%M:%S")) + return metadata + + def test_movie_starting_times(self): + starting_times = [np.float(np.random.randint(200)) for i in range(len(self.movie_files))] + conversion_opts = dict(Movie=dict(starting_times=starting_times, external_mode=False)) + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, + overwrite=True, + conversion_options=conversion_opts, + metadata=self.get_metadata(), + ) + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + mod = nwbfile.acquisition + metadata = self.nwb_converter.get_metadata() + for no in range(len(metadata["Behavior"]["Movies"])): + movie_interface_name = metadata["Behavior"]["Movies"][no]["name"] + assert movie_interface_name in mod + assert starting_times[no] == mod[movie_interface_name].starting_time + + def test_movie_starting_times_none(self): + conversion_opts = dict(Movie=dict(external_mode=False)) + with self.assertRaises(ValueError): + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, overwrite=True, - conversion_options=dict(Movie=dict(starting_times=[123.0])), + conversion_options=conversion_opts, + metadata=self.get_metadata(), ) - # These conversion options do not operate independently, so test them jointly - conversion_options_testing_matrix = [ - dict(Movie=dict(external_mode=False, stub_test=x, chunk_data=y)) - for x, y in product([True, False], repeat=2) - ] - for conversion_options in conversion_options_testing_matrix: - converter.run_conversion( - metadata=metadata, nwbfile_path=nwbfile_path, overwrite=True, conversion_options=conversion_options - ) - - module_name = "TestModule" - module_description = "This is a test module." - nwbfile = converter.run_conversion(metadata=metadata, save_to_file=False) - - # TODO: each of the asserts below here should be broken off into a separate test call - # Much of the detail above can be included into either setUp or setUpClass - assert f"Video: {Path(movie_file).stem}" in nwbfile.acquisition - nwbfile = converter.run_conversion( - metadata=metadata, - save_to_file=False, - nwbfile=nwbfile, - conversion_options=dict(Movie=dict(module_name=module_name)), - ) - assert module_name in nwbfile.modules - nwbfile = converter.run_conversion( - metadata=metadata, - save_to_file=False, - conversion_options=dict(Movie=dict(module_name=module_name, module_description=module_description)), + def test_movie_starting_times_none_duplicate(self): + conversion_opts = dict(Movie=dict(external_mode=True)) + metadata = self.get_metadata() + movie_interface_name = metadata["Behavior"]["Movies"][0]["name"] + metadata["Behavior"]["Movies"][1]["name"] = movie_interface_name + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, + overwrite=True, + conversion_options=conversion_opts, + metadata=metadata, + ) + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + mod = nwbfile.acquisition + assert movie_interface_name in mod + assert mod[movie_interface_name].starting_time == 0.0 + + def test_movie_custom_module(self): + starting_times = [np.float(np.random.randint(200)) for i in range(len(self.movie_files))] + module_name = "TestModule" + module_description = "This is a test module." + conversion_opts = dict( + Movie=dict( + starting_times=starting_times, + external_mode=False, + module_name=module_name, + module_description=module_description, ) - assert module_name in nwbfile.modules and nwbfile.modules[module_name].description == module_description - - metadata.update( - Behavior=dict( - Movies=[ - dict( - name="CustomName", - description="CustomDescription", - unit="CustomUnit", - resolution=12.3, - comments="CustomComments", - ) - ] - ) - ) - converter.run_conversion(metadata=metadata, nwbfile_path=nwbfile_path, overwrite=True) - with NWBHDF5IO(path=nwbfile_path, mode="r") as io: - nwbfile = io.read() - custom_name = metadata["Behavior"]["Movies"][0]["name"] - assert custom_name in nwbfile.acquisition - assert metadata["Behavior"]["Movies"][0]["description"] == nwbfile.acquisition[custom_name].description - assert metadata["Behavior"]["Movies"][0]["comments"] == nwbfile.acquisition[custom_name].comments - - converter.run_conversion( - metadata=metadata, - nwbfile_path=nwbfile_path, + ) + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, + overwrite=True, + conversion_options=conversion_opts, + metadata=self.get_metadata(), + ) + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + assert module_name in nwbfile.processing + assert module_description == nwbfile.processing[module_name].description + + def test_movie_chunking(self): + starting_times = [np.float(np.random.randint(200)) for i in range(len(self.movie_files))] + conv_ops = dict( + Movie=dict(external_mode=False, stub_test=True, starting_times=starting_times, chunk_data=False) + ) + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, overwrite=True, conversion_options=conv_ops, metadata=self.get_metadata() + ) + + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + mod = nwbfile.acquisition + metadata = self.nwb_converter.get_metadata() + for no in range(len(metadata["Behavior"]["Movies"])): + movie_interface_name = metadata["Behavior"]["Movies"][no]["name"] + assert mod[movie_interface_name].data.chunks is not None # TODO retrive storage_layout of hdf5 dataset + + def test_movie_external_mode(self): + starting_times = [np.float(np.random.randint(200)) for i in range(len(self.movie_files))] + conversion_opts = dict(Movie=dict(starting_times=starting_times, external_mode=True)) + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, + overwrite=True, + conversion_options=conversion_opts, + metadata=self.get_metadata(), + ) + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + mod = nwbfile.acquisition + metadata = self.nwb_converter.get_metadata() + for no in range(len(metadata["Behavior"]["Movies"])): + movie_interface_name = metadata["Behavior"]["Movies"][no]["name"] + assert mod[movie_interface_name].external_file[0] == self.movie_files[no] + + def test_movie_duplicate_kwargs_external(self): + conversion_opts = dict(Movie=dict(external_mode=True)) + metadata = self.get_metadata() + movie_interface_name = metadata["Behavior"]["Movies"][0]["name"] + metadata["Behavior"]["Movies"][1]["name"] = movie_interface_name + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, + overwrite=True, + conversion_options=conversion_opts, + metadata=metadata, + ) + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + mod = nwbfile.acquisition + assert len(mod) == 1 + assert movie_interface_name in mod + assert len(mod[movie_interface_name].external_file) == 2 + + def test_movie_duplicate_kwargs(self): + conversion_opts = dict(Movie=dict(external_mode=False)) + metadata = self.get_metadata() + movie_interface_name = metadata["Behavior"]["Movies"][0]["name"] + metadata["Behavior"]["Movies"][1]["name"] = movie_interface_name + with self.assertRaises(AssertionError): + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, overwrite=True, - conversion_options=dict(Movie=dict(external_mode=False, stub_test=True)), + conversion_options=conversion_opts, + metadata=metadata, ) - with NWBHDF5IO(path=nwbfile_path, mode="r") as io: - nwbfile = io.read() - custom_name = metadata["Behavior"]["Movies"][0]["name"] - assert custom_name in nwbfile.acquisition - assert metadata["Behavior"]["Movies"][0]["description"] == nwbfile.acquisition[custom_name].description - assert metadata["Behavior"]["Movies"][0]["unit"] == nwbfile.acquisition[custom_name].unit - assert metadata["Behavior"]["Movies"][0]["resolution"] == nwbfile.acquisition[custom_name].resolution - assert metadata["Behavior"]["Movies"][0]["comments"] == nwbfile.acquisition[custom_name].comments + + def test_movie_stub(self): + starting_times = [np.float(np.random.randint(200)) for i in range(len(self.movie_files))] + conversion_opts = dict(Movie=dict(starting_times=starting_times, external_mode=False, stub_test=True)) + self.nwb_converter.run_conversion( + nwbfile_path=self.nwbfile_path, + overwrite=True, + conversion_options=conversion_opts, + metadata=self.get_metadata(), + ) + with NWBHDF5IO(path=self.nwbfile_path, mode="r") as io: + nwbfile = io.read() + mod = nwbfile.acquisition + metadata = self.nwb_converter.get_metadata() + for no in range(len(metadata["Behavior"]["Movies"])): + movie_interface_name = metadata["Behavior"]["Movies"][no]["name"] + assert mod[movie_interface_name].data.shape[0] == 10 + + def tearDown(self) -> None: + shutil.rmtree(self.test_dir) + del self.nwb_converter