diff --git a/nwbwidgets/image.py b/nwbwidgets/image.py index bb6e599a..1c891ed1 100644 --- a/nwbwidgets/image.py +++ b/nwbwidgets/image.py @@ -1,19 +1,24 @@ -from pathlib import Path, PureWindowsPath - +from pathlib import Path +from typing import Union +from tqdm import tqdm import matplotlib.pyplot as plt +import numpy as np import plotly.graph_objects as go +import plotly.express as px import pynwb -from ipywidgets import widgets, fixed, Layout +from ipywidgets import widgets, Layout from pynwb.image import GrayscaleImage, ImageSeries, RGBImage -from tifffile import imread, TiffFile from .base import fig2widget -from .controllers import StartAndDurationController +from .utils.cmaps import linear_transfer_function +from .utils.imageseries import get_frame_count, get_frame, get_fps from .utils.timeseries import ( get_timeseries_maxt, get_timeseries_mint, timeseries_time_to_ind, ) +from .controllers.time_window_controllers import StartAndDurationController +PathType = Union[str, Path] class ImageSeriesWidget(widgets.VBox): @@ -22,73 +27,164 @@ class ImageSeriesWidget(widgets.VBox): def __init__( self, imageseries: ImageSeries, - foreign_time_window_controller: StartAndDurationController = None, - **kwargs + foreign_start_duration_controller: StartAndDurationController = None, + neurodata_vis_spec: dict = None, ): super().__init__() self.imageseries = imageseries - self.controls = {} - self.out_fig = None - - # Set controller - if foreign_time_window_controller is None: - tmin = get_timeseries_mint(imageseries) - if imageseries.external_file and imageseries.rate: - tif = TiffFile(imageseries.external_file[0]) - tmax = imageseries.starting_time + len(tif.pages) / imageseries.rate - else: - tmax = get_timeseries_maxt(imageseries) - self.time_window_controller = StartAndDurationController(tmax, tmin) + self.figure = None + self.time_slider = None + self.external_file = imageseries.external_file + self.file_selector = None + self.video_start_times = [] + self.fps = self.get_fps() + self.external_files = [] + + if imageseries.external_file is not None: + self.external_files = [i for i in self.imageseries.external_file] + self.video_start_times = self._get_video_start_times() + self.time_slider = widgets.FloatSlider( + min=self.video_start_times[0], + max=self.video_start_times[1]-1/self.fps, + orientation="horizontal", + description="time(s)", + continuous_update=False, + ) + self.external_file = imageseries.external_file[0] + # set file selector: + if len(imageseries.external_file) > 1: + self.file_selector = widgets.Dropdown(options=imageseries.external_file) + self.external_file = self.file_selector.value + self.file_selector.observe(self._update_time_slider, names="value") + + self.time_slider.observe(self._time_slider_callback_external, names="value") + self._set_figure_from_time( + imageseries.starting_time, imageseries.starting_time, self.external_file, + ) else: - self.time_window_controller = foreign_time_window_controller - self.set_controls(**kwargs) - - # Make widget figure - self.set_out_fig() + tmin = get_timeseries_mint(imageseries) + tmax = get_timeseries_maxt(imageseries) + self.time_slider = widgets.FloatSlider( + value=tmin, + min=tmin, + max=tmax, + orientation="horizontal", + description="time(s)", + continuous_update=False, + ) + if len(imageseries.data.shape) == 3: + self._set_figure_from_frame(0) + self.time_slider.observe(self._time_slider_callback_2d, names="value") - self.children = [self.out_fig, self.time_window_controller] + elif len(imageseries.data.shape) == 4: + self._set_figure_3d(0) + self.time_slider.observe(self._time_slider_callback_3d, names="value") + else: + raise NotImplementedError - def time_to_index(self, time): - if self.imageseries.external_file and self.imageseries.rate: - return int((time - self.imageseries.starting_time) * self.imageseries.rate) + # set visible time slider: + if foreign_start_duration_controller is None: + self.visible_time_slider = self.time_slider else: - return timeseries_time_to_ind(self.imageseries, time) - - def set_controls(self, **kwargs): - self.controls.update( - timeseries=fixed(self.imageseries), time_window=self.time_window_controller + self.visible_time_slider = foreign_start_duration_controller + # link the value[0] to time_slider value + def _link_time_slider(change): + self.time_slider.value = change["new"][0] + self.visible_time_slider.observe(_link_time_slider, names="value") + self.children = self.get_children(self.file_selector) + + def _get_video_start_times(self): + if self.external_file is not None: + start_times=[self.imageseries.starting_time] + for file in tqdm(self.imageseries.external_file, + desc="retrieving video start times"): + file_time_duration = get_frame_count(file) / self.fps + start_times.append(file_time_duration) + return np.cumsum(start_times) + + def _time_slider_callback_2d(self, change): + self._set_figure_from_time(change["new"][0]) + + def _time_slider_callback_3d(self, change): + frame_number = self.time_to_index(change["new"][0]) + self._set_figure_3d(frame_number) + + def _time_slider_callback_external(self, change): + time = change["new"] + starting_time = change["owner"].min + self._set_figure_from_time(time, starting_time, self.external_file) + + def _update_time_slider(self, value): + path_ext_file = value["new"] + self.external_file = path_ext_file + idx = self.external_files.index(self.external_file) + tmin = self.video_start_times[idx] + tmax = self.video_start_times[idx+1] + tmax_ = tmax - 1/self.fps + tmax = tmax_ if tmax_>tmin else tmax + if tmax < self.time_slider.min: # order of setting min/max depends + self.time_slider.min = tmin + self.time_slider.max = tmax + else: + self.time_slider.max = tmax + self.time_slider.min = tmin + self._set_figure_from_frame(0, self.external_file) + + def _set_figure_3d(self, frame_number): + import ipyvolume.pylab as p3 + + output = widgets.Output() + p3.figure() + p3.volshow( + self.imageseries.data[frame_number].transpose([1, 0, 2]), + tf=linear_transfer_function([0, 0, 0], max_opacity=0.3), ) - self.controls.update({key: widgets.fixed(val) for key, val in kwargs.items()}) - - def get_frame(self, idx): - if self.imageseries.external_file is not None: - return imread(self.imageseries.external_file, key=idx) + output.clear_output(wait=True) + self.figure = output + with output: + p3.show() + + def _set_figure_from_time(self, time, starting_time, ext_file_path=None): + frame_number = self.time_to_index(time, starting_time) + self._set_figure_from_frame(frame_number, ext_file_path) + + def _set_figure_from_frame(self, frame_number, ext_file_path=None): + data = self.get_frame(frame_number, ext_file_path) + if self.figure is None: + img = px.imshow(data, binary_string=True) + self.figure = go.FigureWidget(img) else: - return self.image_series.data[idx].T + img = px.imshow(data, binary_string=True) + self.figure.for_each_trace(lambda trace: trace.update(img.data[0])) + self.figure.layout.title = f"Frame no: {frame_number}" - def set_out_fig(self): - - self.out_fig = go.FigureWidget( - data=go.Heatmap( - z=self.get_frame(0), - colorscale="gray", - showscale=False, - ) - ) - self.out_fig.update_layout( - xaxis=go.layout.XAxis(showticklabels=False, ticks=""), - yaxis=go.layout.YAxis( - showticklabels=False, ticks="", scaleanchor="x", scaleratio=1 - ), + def get_fps(self): + if self.imageseries.rate is None: + fps = self.imageseries.timestamps[1]-self.imageseries.timestamps[0] + else: + fps = self.imageseries.rate + return fps + + def time_to_index(self, time, starting_time=None): + starting_time = ( + starting_time + if starting_time is not None + else self.imageseries.starting_time ) + if self.imageseries.external_file: + return int((time - starting_time) * self.fps) + else: + return timeseries_time_to_ind(self.imageseries, time) - def on_change(change): - # Read frame - frame_number = self.time_to_index(change["new"][0]) - image = self.get_frame(frame_number) - self.out_fig.data[0].z = image + def get_children(self, *widgets): + set_widgets = [wid for wid in widgets if wid is not None] + return [self.figure, self.visible_time_slider, *set_widgets] - self.controls["time_window"].observe(on_change) + def get_frame(self, idx, ext_file_path=None): + if ext_file_path is not None: + return get_frame(ext_file_path, idx) + else: + return self.imageseries.data[idx].T def show_image_series(image_series: ImageSeries, neurodata_vis_spec: dict): diff --git a/nwbwidgets/ophys.py b/nwbwidgets/ophys.py index 8ca02a54..2d575c55 100644 --- a/nwbwidgets/ophys.py +++ b/nwbwidgets/ophys.py @@ -3,7 +3,6 @@ import ipywidgets as widgets import numpy as np import plotly.graph_objects as go -import plotly.express as px from ndx_grayscalevolume import GrayscaleVolume from pynwb.base import NWBDataInterface from pynwb.ophys import ( @@ -14,89 +13,24 @@ ImageSegmentation, ) from skimage import measure -from tifffile import imread, TiffFile from .base import df_to_hover_text +from .controllers import ProgressBar +from .image import ImageSeriesWidget from .timeseries import BaseGroupedTraceWidget from .utils.cmaps import linear_transfer_function from .utils.dynamictable import infer_categorical_columns -from .controllers import ProgressBar color_wheel = ["red", "blue", "green", "black", "magenta", "yellow"] -class TwoPhotonSeriesWidget(widgets.VBox): +class TwoPhotonSeriesWidget(ImageSeriesWidget): """Widget showing Image stack recorded over time from 2-photon microscope.""" - def __init__(self, indexed_timeseries: TwoPhotonSeries, neurodata_vis_spec: dict): - super().__init__() - - def _add_fig_trace(img_fig: go.Figure, index): - if self.figure is None: - self.figure = go.FigureWidget(img_fig) - else: - self.figure.for_each_trace(lambda trace: trace.update(img_fig.data[0])) - self.figure.layout.title = f"Frame no: {index}" - - if indexed_timeseries.data is None: - if indexed_timeseries.external_file is not None: - path_ext_file = indexed_timeseries.external_file[0] - # Get Frames dimensions - tif = TiffFile(path_ext_file) - n_samples = len(tif.pages) - page = tif.pages[0] - n_y, n_x = page.shape - - def update_figure(index=0): - # Read first frame - img_fig = px.imshow( - imread(path_ext_file, key=int(index)), binary_string=True - ) - _add_fig_trace(img_fig, index) - - slider = widgets.IntSlider( - value=0, min=0, max=n_samples - 1, orientation="horizontal" - ) - else: - if len(indexed_timeseries.data.shape) == 3: - - def update_figure(index=0): - img_fig = px.imshow( - indexed_timeseries.data[index].T, binary_string=True - ) - _add_fig_trace(img_fig, index) - - elif len(indexed_timeseries.data.shape) == 4: - import ipyvolume.pylab as p3 - - output = widgets.Output() - - def update_figure(index=0): - p3.figure() - p3.volshow( - indexed_timeseries.data[index].transpose([1, 0, 2]), - tf=linear_transfer_function([0, 0, 0], max_opacity=0.3), - ) - output.clear_output(wait=True) - self.figure = output - with output: - p3.show() - - else: - raise NotImplementedError - - slider = widgets.IntSlider( - value=0, - min=0, - max=indexed_timeseries.data.shape[0] - 1, - orientation="horizontal", - ) - - slider.observe(lambda change: update_figure(change.new), names="value") - self.figure = None - self.controls = dict(slider=slider) - update_figure() - self.children = [self.figure, slider] + def __init__( + self, indexed_timeseries: TwoPhotonSeries, neurodata_vis_spec: dict = None + ): + super().__init__(indexed_timeseries, neurodata_vis_spec) def show_df_over_f(df_over_f: DfOverF, neurodata_vis_spec: dict): diff --git a/nwbwidgets/utils/imageseries.py b/nwbwidgets/utils/imageseries.py new file mode 100644 index 00000000..94396413 --- /dev/null +++ b/nwbwidgets/utils/imageseries.py @@ -0,0 +1,189 @@ +from pathlib import Path +from typing import Union + +try: + import cv2 + + HAVE_OPENCV = True +except ImportError: + HAVE_OPENCV = False + +try: + from tifffile import imread, TiffFile + + HAVE_TIF = True +except ImportError: + HAVE_TIF = False + +PathType = Union[str, Path] + +VIDEO_EXTENSIONS = [".mp4", ".avi", ".wmv", ".mov", ".flv"] + + +class VideoCaptureContext: + """ + Context manager for opening videos using opencv + """ + def __init__(self, video_path): + self.vc = cv2.VideoCapture(video_path) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.vc.release() + + +def get_frame_shape(external_path_file: PathType): + """ + Get frame shape + Parameters + ---------- + external_path_file: PathType + path of external file from the external_file argument of ImageSeries + """ + external_path_file = Path(external_path_file) + if external_path_file.suffix in [".tif", ".tiff"]: + return get_frame_shape_tif(external_path_file) + elif external_path_file.suffix in VIDEO_EXTENSIONS: + return get_frame_shape_video(external_path_file) + else: + raise NotImplementedError + + +def get_frame_count(external_path_file: PathType): + """ + Get number of frames in the video or tif stack. + Parameters + ---------- + external_path_file: PathType + path of external file from the external_file argument of ImageSeries + """ + external_path_file = Path(external_path_file) + if external_path_file.suffix in [".tif", ".tiff"]: + return get_frame_count_tif(external_path_file) + elif external_path_file.suffix in VIDEO_EXTENSIONS: + return get_frame_count_video(external_path_file) + else: + raise NotImplementedError + + +def get_frame(external_path_file: PathType, index): + """ + Get frame + Parameters + ---------- + external_path_file: PathType + path of external file from the external_file argument of ImageSeries + index: int + the frame number to retrieve from the video/tif file + """ + external_path_file = Path(external_path_file) + if external_path_file.suffix in [".tif", ".tiff"]: + return get_frame_tif(external_path_file, index) + elif external_path_file.suffix in VIDEO_EXTENSIONS: + return get_frame_video(external_path_file, index) + else: + raise NotImplementedError + + +def get_fps(external_path_file: PathType): + external_path_file = Path(external_path_file) + if external_path_file.suffix in [".tif", ".tiff"]: + return get_fps_tif(external_path_file) + elif external_path_file.suffix in VIDEO_EXTENSIONS: + return get_fps_video(external_path_file) + else: + raise NotImplementedError + +def get_frame_tif(external_path_file: PathType, index): + external_path_file = Path(external_path_file) + assert external_path_file.suffix in [".tif", ".tiff"], f"supply a tif file" + assert HAVE_TIF, "pip install tifffile" + return imread(str(external_path_file), key=int(index)) + + +def get_frame_shape_tif(external_path_file: PathType): + external_path_file = Path(external_path_file) + assert external_path_file.suffix in [".tif", ".tiff"], f"supply a tif file" + assert HAVE_TIF, "pip install tifffile" + tif = TiffFile(external_path_file) + page = tif.pages[0] + return page.shape + + +def get_frame_count_tif(external_path_file: PathType): + external_path_file = Path(external_path_file) + assert external_path_file.suffix in [".tif", ".tiff"], f"supply a tif file" + assert HAVE_TIF, "pip install tifffile" + tif = TiffFile(external_path_file) + return len(tif.pages) + + +def get_fps_tif(external_path_file: PathType): + return + + +def get_frame_video(external_path_file: PathType, index): + external_path_file = Path(external_path_file) + assert ( + external_path_file.suffix in VIDEO_EXTENSIONS + ), f"supply any of {VIDEO_EXTENSIONS} files" + assert HAVE_OPENCV, "pip install opencv-python" + no_frames = get_frame_count(external_path_file) + assert index < no_frames, f"enter index < {no_frames}" + if int(cv2.__version__.split(".")[0]) < 3: + set_arg = cv2.cv.CV_CAP_PROP_POS_FRAMES + else: + set_arg = cv2.CAP_PROP_POS_FRAMES + with VideoCaptureContext(str(external_path_file)) as cap: + set_value = cap.vc.set(set_arg, index) + success, frame = cap.vc.read() + if success: + return frame + else: + raise Exception("could not open video file") + + +def get_frame_count_video(external_path_file: PathType): + external_path_file = Path(external_path_file) + assert ( + external_path_file.suffix in VIDEO_EXTENSIONS + ), f"supply any of {VIDEO_EXTENSIONS} files" + assert HAVE_OPENCV, "pip install opencv-python" + if int(cv2.__version__.split(".")[0]) < 3: + frame_count_arg = cv2.cv.CV_CAP_PROP_FRAME_COUNT + else: + frame_count_arg = cv2.CAP_PROP_FRAME_COUNT + with VideoCaptureContext(str(external_path_file)) as cap: + frame_count = cap.vc.get(frame_count_arg) + return frame_count + + +def get_frame_shape_video(external_path_file: PathType): + external_path_file = Path(external_path_file) + assert ( + external_path_file.suffix in VIDEO_EXTENSIONS + ), f"supply any of {VIDEO_EXTENSIONS} files" + assert HAVE_OPENCV, "pip install opencv-python" + with VideoCaptureContext(str(external_path_file)) as cap: + success, frame = cap.vc.read() + if success: + return frame.shape + else: + raise Exception("could not open video file") + + +def get_fps_video(external_path_file: PathType): + external_path_file = Path(external_path_file) + assert ( + external_path_file.suffix in VIDEO_EXTENSIONS + ), f"supply any of {VIDEO_EXTENSIONS} files" + assert HAVE_OPENCV, "pip install opencv-python" + if int(cv2.__version__.split(".")[0]) < 3: + fps_arg = cv2.cv.CV_CAP_PROP_FPS + else: + fps_arg = cv2.CAP_PROP_FPS + with VideoCaptureContext(str(external_path_file)) as cap: + fps = cap.vc.get(fps_arg) + return fps \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt index 55fcfcfb..131a63a1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -8,4 +8,5 @@ ndx_grayscalevolume plotly tqdm>=4.36.0 ndx-spectrum -aiohttp \ No newline at end of file +aiohttp +opencv-python \ No newline at end of file diff --git a/test/fixtures_imageseries.py b/test/fixtures_imageseries.py new file mode 100644 index 00000000..f4bc1ce1 --- /dev/null +++ b/test/fixtures_imageseries.py @@ -0,0 +1,69 @@ +import cv2 +import numpy as np +import pytest +from tifffile import imwrite + + +@pytest.fixture(scope="session") +def movie_fps(): + return 10.0 + + +@pytest.fixture(scope="session") +def movie_shape(): + return (30, 40, 3) + + +@pytest.fixture(scope="session") +def movie_no_frames(): + return 10, 15 + + +@pytest.fixture(scope="session") +def create_frames(movie_no_frames, movie_shape): + mov_ar1 = np.random.randint(0, 255, size=[*movie_shape, movie_no_frames[0]], dtype="uint8") + mov_ar2 = np.random.randint(0, 255, size=[*movie_shape, movie_no_frames[1]], dtype="uint8") + return mov_ar1, mov_ar2 + + +@pytest.fixture(scope="session") +def create_movie_files(tmp_path_factory, create_frames, movie_fps): + base_path = tmp_path_factory.mktemp('moviefiles') + print(base_path) + mov_ar1_path = base_path/'movie1.avi' + mov_ar2_path = base_path/'movie.avi' + mov_array1, mov_array2 = create_frames + movie_shape = mov_array1.shape[1::-1] + cap_mp4 = cv2.VideoWriter(filename=str(mov_ar1_path), + apiPreference=None, + fourcc=cv2.VideoWriter_fourcc("M", "J", "P", "G"), + fps=movie_fps, + frameSize=movie_shape, + params=None) + cap_avi = cv2.VideoWriter(filename=str(mov_ar2_path), + apiPreference=None, + fourcc=cv2.VideoWriter_fourcc("M", "J", "P", "G"), + fps=movie_fps, + frameSize=movie_shape, + params=None) + for frame_no in range(mov_array1.shape[-1]): + cap_mp4.write(mov_array1[:,:,:,frame_no]) + for frame_no in range(mov_array2.shape[-1]): + cap_avi.write(mov_array2[:,:,:,frame_no]) + + cap_mp4.release() + cap_avi.release() + return mov_ar1_path, mov_ar2_path + + +@pytest.fixture(scope="session") +def create_tif_files(tmp_path_factory, create_frames): + base_path = tmp_path_factory.mktemp('tiffiles') + print(base_path) + frame1 = create_frames[0].transpose([3,0,1,2]) + frame2 = create_frames[1].transpose([3,0,1,2]) + tif_path1 = base_path/'tif_image1.tif' + tif_path2 = base_path/'tif_image2.tif' + imwrite(str(tif_path1), frame1, photometric='rgb') + imwrite(str(tif_path2), frame2, photometric='rgb') + return tif_path1, tif_path2 diff --git a/test/test_image.py b/test/test_image.py index 65315cc2..f896dea8 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -6,10 +6,14 @@ show_grayscale_image, show_index_series, show_image_series, + ImageSeriesWidget, ) from nwbwidgets.view import default_neurodata_vis_spec +from nwbwidgets.controllers.time_window_controllers import StartAndDurationController from pynwb.base import TimeSeries from pynwb.image import RGBImage, GrayscaleImage, IndexSeries, ImageSeries +import plotly.graph_objects as go +from .fixtures_imageseries import * def test_show_rbg_image(): @@ -32,14 +36,14 @@ def test_show_index_series(): name="Index Series time data", data=np.random.rand(800).reshape((8, 10, 10)), rate=1.0, - unit='na', + unit="na", ) index_series = IndexSeries( name="Sample Index Series", data=data, indexed_timeseries=indexed_timeseries, rate=1.0, - unit='n.a.', + unit="n.a.", ) assert isinstance( @@ -49,8 +53,80 @@ def test_show_index_series(): def test_show_image_series(): data = np.random.rand(800).reshape((8, 10, 10)) - image_series = ImageSeries(name="Image Series", data=data, rate=1.0, unit='n.a.') + image_series = ImageSeries(name="Image Series", data=data, rate=1.0, unit="n.a.") assert isinstance( show_image_series(image_series, default_neurodata_vis_spec), widgets.Widget ) + + +def test_image_series_widget_data_2d(): + data = np.random.randint(0, 255, size=[10, 30, 40]) + image_series = ImageSeries(name="Image Series", data=data, rate=1.0, unit="n.a.") + wd = ImageSeriesWidget(image_series) + assert isinstance(wd.figure, go.FigureWidget) + assert wd.time_slider.min == 0.0 + assert wd.time_slider.max == 9.0 + + +def test_image_series_widget_data_3d(): + data = np.random.randint(0, 255, size=[10, 30, 40, 5]) + image_series = ImageSeries(name="Image Series", data=data, rate=1.0, unit="n.a.") + wd = ImageSeriesWidget(image_series) + assert isinstance(wd.figure, widgets.Output) + assert wd.time_slider.min == 0.0 + assert wd.time_slider.max == 9.0 + + +def test_image_series_widget_external_file_tif(create_tif_files, movie_no_frames): + rate = 1.0 + image_series = ImageSeries( + name="Image Series", external_file=create_tif_files, rate=rate, unit="n.a." + ) + wd = ImageSeriesWidget(image_series) + assert isinstance(wd.figure, go.FigureWidget) + assert wd.time_slider.max == movie_no_frames[0]/rate - 1/rate + assert wd.time_slider.min == 0.0 + assert wd.file_selector.value == create_tif_files[0] + wd.file_selector.value = create_tif_files[1] + assert wd.time_slider.min == movie_no_frames[0]/rate + assert wd.time_slider.max == sum([movie_no_frames[i]/rate for i in range(len(movie_no_frames))]) - 1/rate + + +def test_image_series_widget_external_file_single(create_tif_files, movie_no_frames): + rate = 1.0 + image_series = ImageSeries( + name="Image Series", external_file=create_tif_files[:1], rate=rate, unit="n.a." + ) + wd = ImageSeriesWidget(image_series) + assert isinstance(wd.figure, go.FigureWidget) + assert wd.time_slider.max == movie_no_frames[0]/rate - 1/rate + assert wd.time_slider.min == 0.0 + assert wd.file_selector is None + + +def test_image_series_widget_external_file_video(create_movie_files, movie_no_frames, movie_fps): + image_series = ImageSeries( + name="Image Series", external_file=create_movie_files, unit="n.a.", rate=movie_fps + ) + wd = ImageSeriesWidget(image_series) + assert isinstance(wd.figure, go.FigureWidget) + assert wd.time_slider.max == movie_no_frames[0]/movie_fps - 1/movie_fps + assert wd.time_slider.min == 0.0 + assert wd.file_selector.value == create_movie_files[0] + wd.file_selector.value = create_movie_files[1] + assert wd.time_slider.max == sum([movie_no_frames[i]/movie_fps for i in range(len(movie_no_frames))]) - 1/movie_fps + wd.file_selector.value = create_movie_files[0] + assert wd.time_slider.max == movie_no_frames[0]/movie_fps - 1/movie_fps + assert wd.time_slider.min == 0.0 + + +def test_image_series_foreign_time_controller(create_movie_files, movie_no_frames, movie_fps): + st_controller = StartAndDurationController(tmax=20.0, tmin=0) + image_series = ImageSeries( + name="Image Series", external_file=create_movie_files, unit="n.a.", rate=movie_fps + ) + wd = ImageSeriesWidget(image_series,st_controller) + assert wd.time_slider.max == movie_no_frames[0]/movie_fps - 1/movie_fps + st_controller.value = (5.0, 20.0) + assert wd.time_slider.value == movie_no_frames[0]/movie_fps - 1/movie_fps \ No newline at end of file diff --git a/test/test_ophys.py b/test/test_ophys.py index b4ec3608..325a9b12 100644 --- a/test/test_ophys.py +++ b/test/test_ophys.py @@ -112,9 +112,9 @@ def setUpClass(self): self.df_over_f = DfOverF(rrs) def test_show_two_photon_series(self): - wid = TwoPhotonSeriesWidget(self.image_series, default_neurodata_vis_spec) + wid = TwoPhotonSeriesWidget(self.image_series) assert isinstance(wid, widgets.Widget) - wid.controls['slider'].value = 50 + wid.time_slider.value = 50.0 def test_show_3d_two_photon_series(self): image_series3 = TwoPhotonSeries( @@ -125,9 +125,9 @@ def test_show_3d_two_photon_series(self): rate=1.0, unit="n.a", ) - wid = TwoPhotonSeriesWidget(image_series3, default_neurodata_vis_spec) + wid = TwoPhotonSeriesWidget(image_series3) assert isinstance(wid, widgets.Widget) - wid.controls['slider'].value = 50 + wid.time_slider.value = 50.0 def test_show_df_over_f(self): dff = show_df_over_f(self.df_over_f, default_neurodata_vis_spec) diff --git a/test/test_utils_imageseries.py b/test/test_utils_imageseries.py new file mode 100644 index 00000000..1ede6f4f --- /dev/null +++ b/test/test_utils_imageseries.py @@ -0,0 +1,31 @@ +from nwbwidgets.utils.imageseries import get_frame_count, get_frame, get_frame_shape +from .fixtures_imageseries import * + +def test_movie_frame(create_movie_files, movie_shape): + frame = get_frame(create_movie_files[0], 0) + assert frame.shape == movie_shape + + +def test_tif_frame(create_tif_files, movie_shape): + frame = get_frame(str(create_tif_files[0]), 0) + assert frame.shape == movie_shape + + +def test_movie_no_frames(create_movie_files, movie_no_frames): + count = get_frame_count(create_movie_files[0]) + assert count == movie_no_frames[0] + + +def test_tif_no_frames(create_tif_files, movie_no_frames): + count = get_frame_count(create_tif_files[0]) + assert count == movie_no_frames[0] + + +def test_movie_frame_shape(create_movie_files, movie_shape): + shape = get_frame_shape(create_movie_files[0]) + assert shape == movie_shape + + +def test_tif_frame_shape(create_tif_files, movie_shape): + shape = get_frame_shape(create_tif_files[0]) + assert shape == movie_shape