diff --git a/auto_editor/subcommands/levels.py b/auto_editor/subcommands/levels.py index cede6fb2b..f1eae4505 100644 --- a/auto_editor/subcommands/levels.py +++ b/auto_editor/subcommands/levels.py @@ -59,7 +59,7 @@ def levels_options(parser: ArgumentParser) -> ArgumentParser: def print_arr(arr: NDArray) -> None: print("") print("@start") - if arr.dtype == np.float64: + if arr.dtype in (np.float64, np.float32, np.float16): for a in arr: sys.stdout.write(f"{a:.20f}\n") elif arr.dtype == np.bool_: @@ -131,18 +131,49 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None: levels = Levels(src, tb, bar, False, log, strict=True) try: if method == "audio": - container = av.open(src.path, "r") - audio_stream = container.streams.audio[obj["stream"]] - log.experimental(audio_stream.codec) - print_arr_gen(iter_audio(audio_stream, tb)) - container.close() + if (arr := levels.read_cache("audio", (obj["stream"],))) is not None: + print_arr(arr) + else: + container = av.open(src.path, "r") + audio_stream = container.streams.audio[obj["stream"]] + log.experimental(audio_stream.codec) + + values = [] + + def value_storing_generator() -> Iterator[np.float32]: + for value in iter_audio(audio_stream, tb): + values.append(value) + yield value + + print_arr_gen(value_storing_generator()) + container.close() + + cache_array = np.array(values, dtype=np.float32) + levels.cache(cache_array, "audio", (obj["stream"],)) elif method == "motion": - container = av.open(src.path, "r") - video_stream = container.streams.video[obj["stream"]] - log.experimental(video_stream.codec) - print_arr_gen(iter_motion(video_stream, tb, obj["blur"], obj["width"])) - container.close() + mobj = (obj["stream"], obj["width"], obj["blur"]) + if (arr := levels.read_cache("motion", mobj)) is not None: + print_arr(arr) + else: + container = av.open(src.path, "r") + video_stream = container.streams.video[obj["stream"]] + log.experimental(video_stream.codec) + + values = [] + + def value_storing_generator() -> Iterator[np.float32]: + for value in iter_motion( + video_stream, tb, obj["blur"], obj["width"] + ): + values.append(value) + yield value + + print_arr_gen(value_storing_generator()) + container.close() + + cache_array = np.array(values, dtype=np.float32) + levels.cache(cache_array, "motion", mobj) elif method == "subtitle": print_arr(levels.subtitle(**obj))