diff --git a/capture_calibrated.yml b/capture_calibrated.yml
index 14887ce..814405a 100644
--- a/capture_calibrated.yml
+++ b/capture_calibrated.yml
@@ -1,35 +1,24 @@
input:
- type: WebcamInput
- device: {{ webcam-device }}
-
-decode:
- type: DualFrameRawDecoder
- input: ,
+# #[serde(default)]
+# green_diff_weights: Vec<[f32; 2]>,
+
+# // index -> lags
+# // 0, 1, 2, 3, 4 -> 0, -1, 1, -2, 2
+# #[serde(default)]
+# dark_col_row_weights: Vec<[[f32; 2 * NUM_DARKCOLS]; 2]>,
+# #[serde(default)]
+# offset: f32,
+# }
+
+# #[derive(serde::Deserialize)]
+# pub struct RowNoiseRemovalModel {
+# weights_odd: PerHalfWeights,
+# weights_even: PerHalfWeights,
+# }
+
+
+
+weights_even:
+ dark_col_row_weights:
+ - - [0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375]
+ - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+weights_odd:
+ dark_col_row_weights:
+ - - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ - [0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375]
diff --git a/src/nodes.rs b/src/nodes.rs
index 0677bb4..f35dc12 100644
--- a/src/nodes.rs
+++ b/src/nodes.rs
@@ -11,13 +11,14 @@ use crate::{
bitdepth_convert::BitDepthConverter,
dual_frame_raw_decoder::{DualFrameRawDecoder, ReverseDualFrameRawDecoder},
fp_to_uint::Fp32ToUInt16,
+ row_noise_removal::RowNoiseRemoval,
sz3::SZ3Compress,
zstd::ZstdBlobReader,
},
nodes_gpu::{
bitdepth_convert::GpuBitDepthConverter,
- calibrate::Calibrate,
color_voodoo::ColorVoodoo,
+ darkframe_subtract::DarkframeSubtract,
debayer::Debayer,
debayer_resolution_loss::DebayerResolutionLoss,
histogram::Histogram,
@@ -91,7 +92,8 @@ generate_dynamic_node_creation_functions![
Split,
SZ3Compress,
ZstdBlobReader,
- Calibrate,
+ DarkframeSubtract,
+ RowNoiseRemoval,
Histogram,
#[cfg(target_os = "linux")]
Plot,
diff --git a/src/nodes_cpu/bitdepth_convert.rs b/src/nodes_cpu/bitdepth_convert.rs
index d7acae1..4e3995b 100644
--- a/src/nodes_cpu/bitdepth_convert.rs
+++ b/src/nodes_cpu/bitdepth_convert.rs
@@ -16,11 +16,14 @@ use async_trait::async_trait;
pub struct BitDepthConverter {
input: InputProcessingNode,
+ target_bitdepth: u64,
context: ProcessingContext,
}
impl Parameterizable for BitDepthConverter {
fn describe_parameters() -> ParametersDescriptor {
- ParametersDescriptor::new().with("input", Mandatory(NodeInputParameter))
+ ParametersDescriptor::new()
+ .with("input", Mandatory(NodeInputParameter))
+ .with("to", WithDefault(U8(), IntRangeValue(8)))
}
fn from_parameters(
@@ -28,7 +31,11 @@ impl Parameterizable for BitDepthConverter {
_is_input_to: &[NodeID],
context: &ProcessingContext,
) -> Result {
- Ok(Self { input: parameters.take("input")?, context: context.clone() })
+ Ok(Self {
+ input: parameters.take("input")?,
+ context: context.clone(),
+ target_bitdepth: parameters.take("to")?,
+ })
}
}
@@ -40,12 +47,12 @@ impl ProcessingNode for BitDepthConverter {
.context
.ensure_cpu_buffer::(&input)
.context("Wrong input format for BitDepthConverter")?;
- let interp = Raw { bit_depth: 8, ..frame.interp };
+ let interp = Raw { bit_depth: self.target_bitdepth, ..frame.interp };
let mut new_buffer = unsafe { self.context.get_uninit_cpu_buffer(interp.required_bytes()) };
- if frame.interp.bit_depth == 8 {
+ if frame.interp.bit_depth == self.target_bitdepth {
return Ok(input);
- } else if frame.interp.bit_depth == 12 {
+ } else if (frame.interp.bit_depth == 12) && (self.target_bitdepth == 8) {
new_buffer.as_mut_slice(|new_buffer| {
frame.storage.as_slice(|frame_storage| {
for (input, output) in
@@ -56,7 +63,28 @@ impl ProcessingNode for BitDepthConverter {
}
})
});
+ } else if (frame.interp.bit_depth == 12) && (self.target_bitdepth == 16) {
+ new_buffer.as_mut_slice(|new_buffer| {
+ frame.storage.as_slice(|frame_storage| {
+ let new_buffer: &mut [u16] = bytemuck::cast_slice_mut(new_buffer);
+ for (input, output) in
+ frame_storage.chunks_exact(3).zip(new_buffer.chunks_exact_mut(2))
+ {
+ let a: u16 = ((input[0] as u16) << 4) | ((input[1] >> 4) as u16);
+ let b: u16 = (((input[1] & 0xf) as u16) << 8) | input[2] as u16;
+
+ output[0] = a;
+ output[1] = b;
+ }
+ })
+ });
} else {
+ println!(
+ "using unoptimized bitdepth conversion path from {} to {}",
+ frame.interp.bit_depth, self.target_bitdepth
+ );
+ assert_eq!(self.target_bitdepth, 8);
+
let mut rest_value: u32 = 0;
let mut rest_bits: u32 = 0;
let mut pos = 0;
diff --git a/src/nodes_cpu/mod.rs b/src/nodes_cpu/mod.rs
index 36c605b..fa75c80 100644
--- a/src/nodes_cpu/mod.rs
+++ b/src/nodes_cpu/mod.rs
@@ -3,5 +3,6 @@ pub mod benchmark_sink;
pub mod bitdepth_convert;
pub mod dual_frame_raw_decoder;
pub mod fp_to_uint;
+pub mod row_noise_removal;
pub mod sz3;
pub mod zstd;
diff --git a/src/nodes_cpu/rgb_to_rgba.rs b/src/nodes_cpu/rgb_to_rgba.rs
new file mode 100644
index 0000000..d15489c
--- /dev/null
+++ b/src/nodes_cpu/rgb_to_rgba.rs
@@ -0,0 +1,108 @@
+use crate::pipeline_processing::{
+ node::InputProcessingNode,
+ parametrizable::{Parameterizable, Parameters, ParametersDescriptor},
+ payload::Payload,
+};
+use anyhow::{Context, Result};
+
+
+use crate::pipeline_processing::{
+ frame::{Frame, FrameInterpretation, Raw},
+ node::{Caps, NodeID, ProcessingNode, Request},
+ parametrizable::prelude::*,
+ processing_context::ProcessingContext,
+};
+use async_trait::async_trait;
+
+pub struct RgbToRgbaToFlutter {
+ input: InputProcessingNode,
+ context: ProcessingContext,
+ sink: Stream
+}
+impl Parameterizable for BitDepthConverter {
+ fn describe_parameters() -> ParametersDescriptor {
+ ParametersDescriptor::new().with("input", Mandatory(NodeInputParameter))
+ }
+
+ fn from_parameters(
+ mut parameters: Parameters,
+ _is_input_to: &[NodeID],
+ context: &ProcessingContext,
+ ) -> Result {
+ Ok(Self { input: parameters.take("input")?, context: context.clone() })
+ }
+}
+
+#[async_trait]
+impl ProcessingNode for BitDepthConverter {
+ async fn pull(&self, request: Request) -> Result {
+ let frame = self.input.pull(request).await?;
+ let frame = processing_context.ensure_cpu_buffer::(&frame).unwrap();
+ let mut rgba_buffer = vec![0u8; (frame.interp.width * frame.interp.height * 4) as usize];
+
+ let interp = Rgba { width: frame.interp.width, height: frame.interp.height, fps: frame.interp.fps };
+ let mut new_buffer = unsafe { self.context.get_uninit_cpu_buffer(interp.required_bytes()) };
+
+ frame.storage.as_slice(|frame| {
+ new_buffer.storage.as_slice_mut(|frame| {
+ for (src, dest) in frame.chunks_exact(3).zip(rgba_buffer.chunks_exact_mut(4)) {
+ dest[0] = src[0];
+ dest[1] = src[1];
+ dest[2] = src[2];
+ dest[3] = 255;
+
+ }
+ });
+
+
+ if frame.interp.bit_depth == 8 {
+ return Ok(input);
+ } else if frame.interp.bit_depth == 12 {
+ new_buffer.as_mut_slice(|new_buffer| {
+ frame.storage.as_slice(|frame_storage| {
+ for (input, output) in
+ frame_storage.chunks_exact(3).zip(new_buffer.chunks_exact_mut(2))
+ {
+ output[0] = input[0];
+ output[1] = (input[1] << 4) | (input[2] >> 4);
+ }
+ })
+ });
+ } else {
+ let mut rest_value: u32 = 0;
+ let mut rest_bits: u32 = 0;
+ let mut pos = 0;
+ new_buffer.as_mut_slice(|new_buffer| {
+ frame.storage.as_slice(|frame_storage| {
+ for value in frame_storage.iter() {
+ let bits_more_than_bit_depth =
+ (rest_bits as i32 + 8) - frame.interp.bit_depth as i32;
+ if bits_more_than_bit_depth >= 0 {
+ let new_n_bit_value: u32 = rest_value
+ .wrapping_shl(frame.interp.bit_depth as u32 - rest_bits)
+ | value.wrapping_shr(8 - bits_more_than_bit_depth as u32) as u32;
+ new_buffer[pos] = (if frame.interp.bit_depth > 8 {
+ new_n_bit_value.wrapping_shr(frame.interp.bit_depth as u32 - 8)
+ } else {
+ new_n_bit_value
+ } as u8);
+ pos += 1;
+
+ rest_bits = bits_more_than_bit_depth as u32;
+ rest_value = (value & (2u32.pow(rest_bits as u32) - 1) as u8) as u32
+ } else {
+ rest_bits += 8;
+ rest_value = (rest_value << 8) | *value as u32;
+ };
+ }
+ })
+ });
+ }
+
+ let new_frame = Frame { storage: new_buffer, interp };
+
+ Ok(Payload::from(new_frame))
+ }
+
+ fn get_caps(&self) -> Caps { self.input.get_caps() }
+}
diff --git a/src/nodes_cpu/row_noise_removal/generate_model.py b/src/nodes_cpu/row_noise_removal/generate_model.py
new file mode 100755
index 0000000..47e4f6d
--- /dev/null
+++ b/src/nodes_cpu/row_noise_removal/generate_model.py
@@ -0,0 +1,395 @@
+#!/usr/bin/env python3
+
+import sys
+import numpy as np
+from scipy.optimize import curve_fit
+from dataclasses import dataclass
+import numba
+import argparse
+
+
+def eprint(*args):
+ print(*args, file=sys.stderr)
+
+
+np.set_printoptions(suppress=True)
+
+NUM_DARKCOLS = 8
+BLACK_LEVEL = 128
+
+
+@dataclass
+class ModelHalfWeights:
+ # [f32; 2 * NUM_DARKCOLS]
+ dark_col_mean_weights: np.ndarray
+ # Vec<[f32; 2]>, first pair for lag 1, then pair for lag 2, etc
+ green_diff_weights: np.ndarray
+ # Vec<[[f32; 2 * NUM_DARKCOLS]; 2]>, first for lag 0, then pair for lag 1, etc
+ dark_col_row_weights: np.ndarray
+ offset: float
+
+ def pack_weights(self) -> np.ndarray:
+ return np.concatenate(
+ [
+ np.ravel(self.green_diff_weights),
+ np.ravel(self.dark_col_row_weights),
+ np.ravel(self.dark_col_mean_weights),
+ [self.offset],
+ ]
+ )
+
+
+@dataclass
+class ModelWeights:
+ weights_even: ModelHalfWeights
+ weights_odd: ModelHalfWeights
+
+ def serialize(self) -> str:
+ import yaml
+
+ def ndarray_representer(dumper: yaml.Dumper, array: np.ndarray) -> yaml.Node:
+ if array.size > 0:
+ return dumper.represent_list(array.tolist())
+ else:
+ return dumper.represent_scalar("tag:yaml.org,2002:null", "")
+
+ yaml.add_representer(np.ndarray, ndarray_representer)
+ return yaml.dump(self)
+
+
+def fast_median(array, axis):
+ kth = array.shape[axis] // 2
+ array.partition(kth, axis=axis)
+ index = [slice(None)] * array.ndim
+ index[axis] = kth
+ return array[tuple(index)].copy()
+
+
+@dataclass
+class ModelParameters:
+ num_green_lags: int
+ num_dark_col_rows: int
+ has_dark_column: int
+
+ def nparams(self) -> int:
+ # +1 for offset
+ return (
+ self._nparams_dark_col_mean()
+ + self._nparams_dark_col_rows()
+ + self._nparams_green_diffs()
+ + 1
+ )
+
+ def _nparams_dark_col_rows(self) -> int:
+ if self.num_dark_col_rows == 0:
+ return 0
+ return (self.num_dark_col_rows * 2 - 1) * 2 * NUM_DARKCOLS * 2
+
+ def _nparams_dark_col_mean(self) -> int:
+ if self.has_dark_column:
+ return 2 * NUM_DARKCOLS
+ else:
+ return 0
+
+ def _nparams_green_diffs(self) -> int:
+ return self.num_green_lags * 2
+
+ def initial_weights(self) -> list[float]:
+ # offset initialized with 0 works better
+ return [0.0] * (self.nparams() - 1) + [0.0]
+
+ def unpack_weights(self, weights: np.ndarray) -> ModelHalfWeights:
+ pos = 0
+
+ nparam = self._nparams_green_diffs()
+ green_diff_weights = weights[pos : pos + nparam].reshape((-1, 2))
+ pos += nparam
+
+ nparam = self._nparams_dark_col_rows()
+ dark_col_row_weights = weights[pos : pos + nparam].reshape(
+ (-1, 2, 2 * NUM_DARKCOLS)
+ )
+ pos += nparam
+
+ nparam = self._nparams_dark_col_mean()
+
+ dark_col_mean_weights = weights[pos : pos + nparam]
+ pos += nparam
+ offset = float(weights[pos])
+
+ return ModelHalfWeights(
+ green_diff_weights=green_diff_weights,
+ dark_col_row_weights=dark_col_row_weights,
+ dark_col_mean_weights=dark_col_mean_weights,
+ offset=offset,
+ )
+
+ # signature to make it work with scipy curve_fit
+ # given the hyperparameters, weights and input data x packed by pack_data, produce the row averages
+ def compute_fit(self, x, *weights) -> np.ndarray:
+ return x @ weights[:-1] + weights[-1]
+
+ # TODO(robin): maybe support for different bayer pattern than green at pixel 0,0 ?
+ def build_data(
+ self, darkframes: np.ndarray, darkframe_mean=None
+ ) -> tuple[np.ndarray, np.ndarray]:
+ eprint("calculating model inputs")
+ n_darkframes = darkframes.shape[0]
+ if darkframe_mean is None:
+ darkframe_mean = np.mean(darkframes, axis=0)
+ frame_height = darkframe_mean.shape[0]
+ frame_width = darkframe_mean.shape[1]
+
+ eprint("subtracting mean")
+
+ @numba.njit(parallel=True)
+ def _sub_mean_darkframe(darkframes):
+ for frame in numba.prange(n_darkframes):
+ darkframes[frame] += BLACK_LEVEL
+ darkframes[frame] = np.round(
+ darkframes[frame] - darkframe_mean, 0, darkframes[frame]
+ )
+
+ _sub_mean_darkframe(darkframes)
+
+ darkframes.shape = (-1, frame_width)
+
+ flat_darkframes = darkframes
+ del darkframes
+
+ eprint("calculating row means")
+ row_means = (
+ np.mean(flat_darkframes[:, NUM_DARKCOLS:-NUM_DARKCOLS], axis=1)
+ - BLACK_LEVEL
+ )
+
+ eprint("getting dark cols")
+ dark_col_rows = (
+ np.repeat(
+ np.hstack(
+ [
+ flat_darkframes[:, :NUM_DARKCOLS],
+ flat_darkframes[:, -NUM_DARKCOLS:],
+ ]
+ ).reshape((-1, 2 * NUM_DARKCOLS * 2)),
+ 2,
+ axis=0,
+ )
+ - BLACK_LEVEL
+ )
+
+ eprint("calculating green diffs")
+ # here we cheat a bit. roll would be more nice, however it is slow
+ # non numba version:
+ # for lag in range(1, max_lag + 1):
+ # diffs = []
+ # for frame in tqdm(range(n_darkframes)):
+ # df = flat_darkframes[frame * frame_height: (frame + 1) * frame_height]
+
+ # diff_even = fast_median(df[0:-max_lag:2,0::2] - df[0 + lag:(-max_lag + lag) or None:2,(lag + 0) % 2::2], axis=1)
+ # diff_odd = fast_median(df[1:-max_lag:2,1::2] - df[1 + lag:(-max_lag + lag) or None:2,(lag + 1) % 2::2], axis=1)
+
+ # diff = np.zeros(diff_even.size + diff_odd.size + max_lag)
+ # diff[:-max_lag:2] = diff_even
+ # diff[1:-max_lag:2] = diff_odd
+ # diffs.append(diff)
+
+ # diff = np.concatenate(diffs)
+ # print(diff)
+
+ # green_diffs.append(diff)
+
+ max_lag = self.num_green_lags
+
+ @numba.njit(parallel=True)
+ def _calculate_green_diffs():
+ green_diffs = []
+ for lag in range(1, max_lag + 1):
+ diff = np.zeros(frame_height * n_darkframes, dtype=np.int16)
+ for frame in numba.prange(n_darkframes):
+ df = flat_darkframes[
+ frame * frame_height : (frame + 1) * frame_height
+ ]
+
+ for row in range(frame_height - max_lag):
+ diff[frame_height * frame + row] = np.median(
+ df[row, (row % 2) :: 2]
+ - df[row + lag, (lag + row) % 2 :: 2]
+ )
+ green_diffs.append(diff)
+ return green_diffs
+
+ green_diffs = _calculate_green_diffs()
+
+ if len(green_diffs) > 0:
+ green_diffs = np.stack(green_diffs)
+
+ del flat_darkframes
+ dark_col_means = np.mean(
+ dark_col_rows[::2].reshape((-1, frame_height, 2 * NUM_DARKCOLS)), axis=1
+ )
+ dark_col_means = np.repeat(dark_col_means, frame_height, axis=0)
+ packed_data = self.pack_data(green_diffs, dark_col_rows, dark_col_means)
+
+ # some of the rows have invalid data, those are uncorrectable
+ num_uncorrectable = self._num_uncorrectable()
+
+ if num_uncorrectable > 0:
+ single_frame_mask = np.ones(frame_height, dtype=np.bool8)
+ single_frame_mask[:num_uncorrectable] = 0
+ single_frame_mask[-num_uncorrectable:] = 0
+ mask = np.tile(single_frame_mask, n_darkframes)
+
+ data = packed_data[mask]
+ row_means = row_means[mask]
+ else:
+ data = packed_data
+
+ return (data, row_means)
+
+ # this return data for all rows, even the uncorrectable ones, make sure to mask those
+ def pack_data(
+ self,
+ green_diffs: np.ndarray,
+ dark_col_rows: np.ndarray,
+ dark_col_means: np.ndarray,
+ ) -> np.ndarray:
+ data = []
+
+ # first green lags:
+ # first we have the weight for lag -1, then lag 1 then lag -2, then lag 2, etc
+ for lag in range(0, self.num_green_lags):
+ # green_diffs[lag] is row minus row + lag
+ # get median of row - row minus lag by taking the negative median of row minus lag - lag
+ # lag zero means we want to shift by one
+ data.append(-np.roll(green_diffs[lag], lag + 1, axis=0).reshape(-1, 1))
+ data.append(green_diffs[lag].reshape(-1, 1))
+
+ # again, first lag 0, then lag -1, then lag 1, then lag -2, then lag 2, etc
+ for lag in range(0, self.num_dark_col_rows):
+ if lag == 0:
+ data.append(dark_col_rows)
+ else:
+ # we interpret the lags for the dark cols as blocks of two rows,
+ # as we use both the respective even and the odd row for each row
+ data.append(np.roll(dark_col_rows, 2 * lag, axis=0))
+ data.append(np.roll(dark_col_rows, -2 * lag, axis=0))
+
+ if self.has_dark_column:
+ data.append(dark_col_means)
+
+ return np.hstack(data)
+
+ def fit_model(
+ self, darkframes, darkframe_mean=None, use_odd_even=True
+ ) -> ModelWeights:
+ x, row_means = self.build_data(darkframes, darkframe_mean)
+ del darkframes
+
+ def fit_single_model(x, row_means) -> ModelWeights:
+ p0 = self.initial_weights()
+ weights, _ = curve_fit(
+ lambda x, *weights: self.compute_fit(x, *weights),
+ x,
+ row_means,
+ p0=p0,
+ method="lm",
+ )
+ self.evaluate_model(row_means, weights, x)
+ return weights
+
+ if use_odd_even == False:
+ eprint("creating combined even odd model")
+ weights = fit_single_model(x, row_means)
+ weights_even = weights_odd = weights
+ else:
+ # we cut of the uncorrectable rows from the top and the bottom
+ # ensure, that we have the correct even odd parity when going from x and row_mean to weights_{even, odd}
+ parity = self._num_uncorrectable() % 2
+ eprint("creating even model:")
+ weights_even = fit_single_model(x[parity::2], row_means[parity::2])
+ eprint("creating odd model:")
+ weights_odd = fit_single_model(
+ x[(parity + 1) % 2 :: 2], row_means[(parity + 1) % 2 :: 2]
+ )
+
+ return ModelWeights(
+ weights_even=self.unpack_weights(weights_even),
+ weights_odd=self.unpack_weights(weights_odd),
+ )
+
+ def evaluate_model(self, row_means, weights, x):
+ eprint("evaluating model")
+ initial_residual = (np.sum(row_means**2) / row_means.shape[0]) ** 0.5
+ fitted_row_means = self.compute_fit(x, *weights)
+ fit_residual = (
+ np.sum((row_means - fitted_row_means) ** 2) / row_means.shape[0]
+ ) ** 0.5
+ eprint(
+ f"average quadratic row deviation before correction: {initial_residual}, after: {fit_residual}"
+ )
+
+ def _num_uncorrectable(self):
+ return max(self.num_green_lags, (self.num_dark_col_rows - 1) * 2)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="generate a row noise removal model for the axiom recorder using a given "
+ )
+ parser.add_argument(
+ "darkframe_stack",
+ help="the darkframe stack as a folder of dng files",
+ )
+ parser.add_argument(
+ "-c", "--count", help="the number of darkframes to use for training the model"
+ )
+
+ parser.add_argument(
+ "--green_diff_lags",
+ help="the number of green lags to consider in the model."
+ "Set to 0 to disable green diffs",
+ default=0,
+ type=int,
+ )
+ parser.add_argument(
+ "--dark_column_rows",
+ help="the number of dark column rows to use (1=2, 2=6, 3=10, 4=14, ...). Set to 0 to disable dark columns",
+ default=0,
+ type=int,
+ )
+ parser.add_argument(
+ "--combined_model",
+ help="use the same model for odd and even rows",
+ default=False,
+ action="store_true"
+ )
+
+ args = parser.parse_args()
+
+ from pathlib import Path
+ import rawpy
+
+ eprint("reading darkframe stack")
+ dngs = list(Path(args.darkframe_stack).glob("*.dng"))
+ with rawpy.imread(str(dngs[0])) as raw:
+ h, w = raw.raw_image.shape
+ darkframes = np.empty((len(dngs), h, w), dtype=np.int16)
+ for i, dng in enumerate(dngs):
+ with rawpy.imread(str(dng)) as raw:
+ darkframes[i] = raw.raw_image
+
+ eprint("calculating darkframe mean")
+ mean = np.mean(darkframes, axis=0)
+
+ if args.green_diff_lags == 0 and args.dark_column_rows == 0:
+ eprint("both green lags and dark column rows disabled, cannot fit a model with zero parameters")
+ exit(1)
+
+ # TODO(robin): check dark column values, they are broken
+ # TODO(robin): why with only dark cols is there a asymmetry between even and odd?
+ model = ModelParameters(
+ num_green_lags=args.green_diff_lags, num_dark_col_rows=args.dark_column_rows, has_dark_column=False
+ )
+ weights = model.fit_model(darkframes=darkframes, darkframe_mean=mean, use_odd_even=not args.combined_model)
+ print(weights.serialize())
diff --git a/src/nodes_cpu/row_noise_removal/good.yml b/src/nodes_cpu/row_noise_removal/good.yml
new file mode 100644
index 0000000..b85f934
--- /dev/null
+++ b/src/nodes_cpu/row_noise_removal/good.yml
@@ -0,0 +1,216 @@
+!!python/object:__main__.ModelWeights
+weights_even: !!python/object:__main__.ModelHalfWeights
+ dark_col_mean_weights:
+ dark_col_row_weights:
+ - - - 0.00795359413164966
+ - 0.011072184756342546
+ - 0.009924272884070013
+ - 0.01939364144249242
+ - 0.00870470254833014
+ - 0.014525986982546823
+ - 0.011936463360971603
+ - 0.010217871746359707
+ - 0.012819701851280412
+ - 0.009785510520699384
+ - 0.005004240672390965
+ - 0.002787418121704323
+ - 0.009410269649359631
+ - 0.012159183639759043
+ - 0.011067635956486958
+ - 0.01410952821055491
+ - - 0.020965209123158527
+ - 0.011923491922033857
+ - 0.009564915535930496
+ - 0.01389934375134091
+ - 0.01290921167825828
+ - 0.0046863040434661305
+ - 0.018091398989817153
+ - 0.011737597289692512
+ - 0.01613179923409275
+ - 0.01584630351220505
+ - 0.002820566950344593
+ - 0.00742750646731183
+ - 0.013210073538576882
+ - 0.009335221688389864
+ - 0.011284802979043464
+ - 0.01154835831958488
+ - - - 0.004289520832288559
+ - 0.006622111223319236
+ - 0.005454355176477891
+ - 0.011474874761876244
+ - 0.005144595254898848
+ - 0.007847214191126526
+ - 0.006437223026053526
+ - 0.006183736322761139
+ - 0.008408893160162998
+ - 0.005651766583466354
+ - 0.0029725777293411176
+ - 0.0008430069064963861
+ - 0.005582700537371058
+ - 0.0070682871230009015
+ - 0.006455940161200216
+ - 0.0080486718499864
+ - - 0.015807618442557837
+ - 0.007796985235837638
+ - 0.006276849087804012
+ - 0.009304913478930577
+ - 0.008965023662631307
+ - 0.0033552422434854653
+ - 0.01266552944383183
+ - 0.007476278454814932
+ - 0.011063372381563156
+ - 0.010710698772370086
+ - 0.002058627132335233
+ - 0.005304933978342719
+ - 0.008967965633666935
+ - 0.005444870881265782
+ - 0.008258710148247457
+ - 0.008882758387283583
+ - - - -0.00041235242664525325
+ - 0.0007768358739423795
+ - 0.007313563659670423
+ - 0.014504690272273028
+ - 0.006946894339713028
+ - 0.011291731545137983
+ - 0.00966611356712981
+ - 0.007677386817029865
+ - 0.009642402974804062
+ - 0.006940827682159381
+ - 0.003722157337133242
+ - 0.002360917452502391
+ - 0.007436623328852728
+ - 0.009008781661345272
+ - 0.00044088888236949053
+ - -0.0006373315924448639
+ - - 0.0014568859210241539
+ - -0.002013260899794958
+ - 0.008380801285569069
+ - 0.011737702941565733
+ - 0.010919177655204492
+ - 0.0038699965463770795
+ - 0.014955051922733973
+ - 0.010503760934103419
+ - 0.013993035177093311
+ - 0.013479455907435926
+ - 0.0022387921132540466
+ - 0.006863225245535661
+ - 0.011423785198634804
+ - 0.0074650171212291595
+ - -0.00130340819842616
+ - 0.002120724649370982
+ green_diff_weights:
+ - - 0.07465293179013949
+ - 0.0038357314177002314
+ - - 0.21361846238609475
+ - 0.18241022767161214
+ - - 0.018253186283411594
+ - 0.07493661670108727
+ offset: 9.776528193544962e-05
+weights_odd: !!python/object:__main__.ModelHalfWeights
+ dark_col_mean_weights:
+ dark_col_row_weights:
+ - - - 0.006556859960903388
+ - 0.009882222351375557
+ - 0.00836831947651911
+ - 0.01598469416811063
+ - 0.00786747566945905
+ - 0.012774667644994377
+ - 0.01074163305204318
+ - 0.009071749584779874
+ - 0.011238972666833791
+ - 0.008283112350793929
+ - 0.004784634962152135
+ - 0.002392855825764393
+ - 0.00868329276600574
+ - 0.010479104183005229
+ - 0.009962713193341584
+ - 0.012398601867861795
+ - - 0.021306351014648596
+ - 0.01309940689707896
+ - 0.01003986100880033
+ - 0.015004250918429076
+ - 0.015092682113167812
+ - 0.004712911389336813
+ - 0.018549193607494182
+ - 0.012559796326177363
+ - 0.01780189971671312
+ - 0.01716708988300039
+ - 0.0034578933217720466
+ - 0.008243010536561443
+ - 0.01439622877245579
+ - 0.01088496099420988
+ - 0.012930867815161014
+ - 0.012289534285792955
+ - - - -0.0005152122338434913
+ - -7.674086486282507e-05
+ - 0.007221703857029941
+ - 0.0144538818659539
+ - 0.00652951453905544
+ - 0.010813246399961871
+ - 0.008477570519399846
+ - 0.007788775237534545
+ - 0.010631587222578119
+ - 0.007289013459758929
+ - 0.0038497314316135725
+ - 0.0014624701980173061
+ - 0.007193364412178601
+ - 0.009376297046034463
+ - 0.0004486868689420815
+ - -3.4041320960742116e-06
+ - - 0.0015391927547393988
+ - -0.001738603947300928
+ - 0.008265368215994046
+ - 0.012658516626462179
+ - 0.012259230188782812
+ - 0.004242506017877248
+ - 0.01613983929781178
+ - 0.009952532254017162
+ - 0.01431627300647014
+ - 0.013513975878444153
+ - 0.0028849721197412572
+ - 0.006705233510664884
+ - 0.010823632403500752
+ - 0.00774316226663826
+ - -0.0013281131970664284
+ - 0.0018715475029902043
+ - - - 0.005201355392472196
+ - 0.006978111334487996
+ - 0.005681978451864566
+ - 0.012359647484303647
+ - 0.006031504760986652
+ - 0.009238924245997131
+ - 0.008377394914782796
+ - 0.0062666472612402605
+ - 0.007846654355679564
+ - 0.0058290983451487
+ - 0.0027568059864676205
+ - 0.0019240496913839702
+ - 0.006195265156885215
+ - 0.007139690428595864
+ - 0.007422311600920212
+ - 0.007981089216647025
+ - - 0.014761432978313569
+ - 0.007655708177178347
+ - 0.006973768230338902
+ - 0.009630625860986598
+ - 0.008257539442466507
+ - 0.003228656947926171
+ - 0.012874794728288862
+ - 0.009392747192421407
+ - 0.012159552765582156
+ - 0.011814556984827536
+ - 0.0019060820924957235
+ - 0.005682965768649453
+ - 0.010457171704652608
+ - 0.006040642790530012
+ - 0.007922355721704638
+ - 0.008917187305898947
+ green_diff_weights:
+ - - 0.1995943634556368
+ - 0.2133538696504943
+ - - 0.05675414445662803
+ - 0.08585459551713517
+ - - 0.21633530379837967
+ - 0.024437061133684476
+ offset: -0.00162025688164718
+
diff --git a/src/nodes_cpu/row_noise_removal/mean.yml b/src/nodes_cpu/row_noise_removal/mean.yml
new file mode 100644
index 0000000..7ab0027
--- /dev/null
+++ b/src/nodes_cpu/row_noise_removal/mean.yml
@@ -0,0 +1,13 @@
+# this is a hand-crafted model only for sanity checking. It takes every dark column simply * 0.6
+# and subtracts it. proof that 0.6 is optimal:
+# https://github.com/apertus-open-source-cinema/misc-tools-utilities/commit/48de47b2a544dc32bbd5a8fd7701bb44a31ea850#diff-624053a553f49c0036b4d31282e58b2fR301
+
+weights_even:
+ dark_col_row_weights:
+ - - [0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375]
+ - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+weights_odd:
+ dark_col_row_weights:
+ - - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ - [0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375, 0.0375]
diff --git a/src/nodes_cpu/row_noise_removal/mod.rs b/src/nodes_cpu/row_noise_removal/mod.rs
new file mode 100644
index 0000000..3e54267
--- /dev/null
+++ b/src/nodes_cpu/row_noise_removal/mod.rs
@@ -0,0 +1,248 @@
+use crate::pipeline_processing::{
+ node::InputProcessingNode,
+ parametrizable::{Parameterizable, Parameters, ParametersDescriptor},
+ payload::Payload,
+};
+use anyhow::{Context, Result};
+
+
+use crate::pipeline_processing::{
+ frame::{Frame, FrameInterpretation, Raw},
+ node::{Caps, NodeID, ProcessingNode, Request},
+ parametrizable::prelude::*,
+ processing_context::ProcessingContext,
+};
+use async_trait::async_trait;
+
+#[derive(serde::Deserialize)]
+pub struct PerHalfWeights {
+ #[serde(default)]
+ green_diff_weights: Vec<[f32; 2]>,
+
+ // index -> lags
+ // 0, 1, 2, 3, 4 -> 0, -1, 1, -2, 2
+ #[serde(default)]
+ dark_col_row_weights: Vec<[[f32; 2 * NUM_DARKCOLS]; 2]>,
+ #[serde(default)]
+ offset: f32,
+}
+
+#[derive(serde::Deserialize)]
+pub struct RowNoiseRemovalModel {
+ weights_odd: PerHalfWeights,
+ weights_even: PerHalfWeights,
+}
+
+impl PerHalfWeights {
+ fn num_green_lags(&self) -> usize { self.green_diff_weights.len() }
+
+ fn num_dark_cols(&self) -> usize { 1 + self.dark_col_row_weights.len() / 2 }
+}
+
+impl RowNoiseRemovalModel {
+ fn num_green_lags(&self) -> usize { self.weights_even.num_green_lags() }
+
+ fn num_dark_cols(&self) -> usize { self.weights_even.num_dark_cols() }
+
+ fn num_uncorrectable(&self) -> usize {
+ return self.num_green_lags().max((self.num_dark_cols() - 1) * 2);
+ }
+}
+
+pub struct RowNoiseRemoval {
+ model: RowNoiseRemovalModel,
+ input: InputProcessingNode,
+ context: ProcessingContext,
+ strip_dark_columns: bool,
+}
+
+impl Parameterizable for RowNoiseRemoval {
+ fn describe_parameters() -> ParametersDescriptor {
+ ParametersDescriptor::new()
+ .with("input", Mandatory(NodeInputParameter))
+ .with("strip-dark-columns", Optional(BoolParameter))
+ .with("model", WithDefault(StringParameter, StringValue("internal:good".to_owned())))
+ }
+
+ fn from_parameters(
+ mut parameters: Parameters,
+ _is_input_to: &[NodeID],
+ context: &ProcessingContext,
+ ) -> Result {
+ let model_path: String = parameters.take("model")?;
+ let model_yml = match model_path.as_str() {
+ "internal:good" => include_str!("./good.yml").to_owned(),
+ "internal:only_dark" => include_str!("./only_dark.yml").to_owned(),
+ "internal:only_green" => include_str!("./only_green.yml").to_owned(),
+ "internal:mean" => include_str!("./mean.yml").to_owned(),
+ _ => std::fs::read_to_string(&model_path)
+ .with_context(|| format!("Failed to read model from {}", model_path))?,
+ };
+
+ Ok(Self {
+ input: parameters.take("input")?,
+ strip_dark_columns: parameters.take("strip-dark-columns")?,
+ context: context.clone(),
+ model: serde_yaml::from_str(&model_yml)?,
+ })
+ }
+}
+
+
+fn get_col_parity_for_row(interp: Raw, row: usize) -> usize {
+ let parity_for_even_row = if interp.cfa.red_in_first_col && interp.cfa.red_in_first_row {
+ 1
+ } else if !interp.cfa.red_in_first_col && !interp.cfa.red_in_first_row {
+ 1
+ } else {
+ 0
+ };
+
+ if row % 2 == 0 {
+ parity_for_even_row
+ } else {
+ 1 - parity_for_even_row
+ }
+}
+
+
+// per side
+const NUM_DARKCOLS: usize = 8;
+const BLACK_LEVEL: f32 = 128f32;
+
+#[async_trait]
+impl ProcessingNode for RowNoiseRemoval {
+ async fn pull(&self, request: Request) -> Result {
+ let frame = self.input.pull(request).await?;
+ let frame = self.context.ensure_cpu_buffer::(&frame).unwrap();
+ let interp = frame.interp;
+ let width = interp.width as usize;
+ let height = interp.height as usize;
+ assert_eq!(frame.interp.bit_depth, 16);
+
+ let model = &self.model;
+
+ let mut green_diffs =
+ vec![vec![0f32; (height - model.num_uncorrectable())]; model.num_green_lags()];
+
+ let slice = frame.storage.as_slice(|frame| {
+ let frame: &[u16] = bytemuck::cast_slice(frame);
+
+ for row in 0..(height - model.num_uncorrectable()) {
+ for lag in 0..model.num_green_lags() {
+ let lag = lag + 1;
+ let mut diffs = Vec::new();
+
+ // a random bayer pattern for thinking support:
+ // | B | G | B | G |
+ // | G | R | G | R |
+ // | B | G | B | G |
+ // | G | R | G | R |
+
+ // this is a offset, so that `col + col_parity` in the loop below are the
+ // indices of the green values for `row`
+ // let col_parity = get_col_parity_for_row(interp, row);
+ let col_parity = get_col_parity_for_row(interp, row);
+ // this is a offset, so that `col + col_lag_parity` in the loop below are the
+ // indices of the green values for `row + lag`
+ let col_lag_parity = get_col_parity_for_row(interp, row + lag);
+
+ for col in (0..width).step_by(2) {
+ diffs.push(
+ frame[row * width + col + col_parity] as i32
+ - frame[(row + lag) * width + col + col_lag_parity] as i32,
+ );
+ }
+ let middle = diffs.len() / 2;
+ let (_, median, _) = diffs.select_nth_unstable(middle);
+ green_diffs[lag - 1][row] = *median as f32;
+ }
+ }
+ });
+
+
+ let strip_offset = if self.strip_dark_columns { NUM_DARKCOLS } else { 0 };
+ let output_width = (interp.width - 2 * (strip_offset as u64)) as usize;
+ let output_interp = Raw { width: output_width as u64, ..interp };
+ let mut row_noise_removed =
+ unsafe { self.context.get_uninit_cpu_buffer(output_interp.required_bytes()) };
+
+ frame.storage.as_slice(|src| {
+ let src: &[u16] = bytemuck::cast_slice(src);
+ row_noise_removed.as_mut_slice(|dst| {
+ let dst: &mut [u16] = bytemuck::cast_slice_mut(dst);
+ for row in model.num_uncorrectable()..(height - model.num_uncorrectable()) {
+ let weights =
+ if row % 2 == 0 { &model.weights_even } else { &model.weights_odd };
+ let mut offset = weights.offset;
+
+ for (lag, lag_weights) in weights.green_diff_weights.iter().enumerate() {
+ // the lag for the row is 1 based. lag == 0 means we want this offseted by
+ // one
+ offset -= green_diffs[lag][row - lag - 1] * lag_weights[0];
+ offset += green_diffs[lag][row] * lag_weights[1];
+ }
+
+
+ for (i, [weights_even, weights_odd]) in
+ weights.dark_col_row_weights.iter().enumerate()
+ {
+ // 0, 1, 2, 3, 4 -> 0, -1, 1, -2, 2
+ let i = i as isize;
+ let lag = (i / 2) - (i % 2);
+ let even_row = row - (row % 2);
+
+ for col in 0..NUM_DARKCOLS {
+ offset += weights_even[col]
+ * (src[(even_row as isize + 2 * lag) as usize * width + col]
+ as f32
+ - BLACK_LEVEL);
+ offset += weights_even[col + NUM_DARKCOLS]
+ * (src[(even_row as isize + 2 * lag) as usize * width
+ + (width - NUM_DARKCOLS)
+ + col] as f32
+ - BLACK_LEVEL);
+ }
+
+ for col in 0..NUM_DARKCOLS {
+ offset += weights_odd[col]
+ * (src[(even_row as isize + 1 + 2 * lag) as usize * width + col]
+ as f32
+ - BLACK_LEVEL);
+ offset += weights_odd[col + NUM_DARKCOLS]
+ * (src[(even_row as isize + 1 + 2 * lag) as usize * width
+ + (width - NUM_DARKCOLS)
+ + col] as f32
+ - BLACK_LEVEL);
+ }
+ }
+
+ for col in 0..output_width {
+ dst[row * output_width + col] =
+ (src[row * width + col + strip_offset] as f32 - offset) as u16;
+ }
+ }
+
+
+ // fill the uncorrectable rows with the original pixel data
+ // TODO(robin): maybe fall back to a simpler model without any uncorrectable
+ // rows?
+ for row in 0..model.num_uncorrectable() {
+ for col in 0..output_width {
+ dst[row * output_width + col] = src[row * width + col + strip_offset];
+ }
+ }
+
+ for row in (height - model.num_uncorrectable())..height {
+ for col in 0..output_width {
+ dst[row * output_width + col] = src[row * width + col + strip_offset];
+ }
+ }
+ })
+ });
+
+ Ok(Payload::from(Frame { storage: row_noise_removed, interp: output_interp }))
+ }
+
+ fn get_caps(&self) -> Caps { self.input.get_caps() }
+}
diff --git a/src/nodes_cpu/row_noise_removal/only_dark.yml b/src/nodes_cpu/row_noise_removal/only_dark.yml
new file mode 100644
index 0000000..3f6bf8a
--- /dev/null
+++ b/src/nodes_cpu/row_noise_removal/only_dark.yml
@@ -0,0 +1,204 @@
+!!python/object:__main__.ModelWeights
+weights_even: !!python/object:__main__.ModelHalfWeights
+ dark_col_mean_weights:
+ dark_col_row_weights:
+ - - - 0.01427063623153896
+ - 0.020726888193669295
+ - 0.018231598192991177
+ - 0.03598546701281998
+ - 0.016810504039882785
+ - 0.021939640649473388
+ - 0.020700098076264477
+ - 0.01776905636969181
+ - 0.026014481531077077
+ - 0.019617473221962004
+ - 0.007606831314222611
+ - 0.004596871359072187
+ - 0.018058180223373577
+ - 0.024601721519986065
+ - 0.01993305137090564
+ - 0.025473523522684713
+ - - 0.0440619717486404
+ - 0.02804324135025998
+ - 0.018511011358386818
+ - 0.02756190137664548
+ - 0.025148455871132017
+ - 0.010751002658578559
+ - 0.03886053372113459
+ - 0.022593757937302898
+ - 0.03436867818548011
+ - 0.029874628063657083
+ - 0.00581091632032547
+ - 0.015089337534664678
+ - 0.023833826644603765
+ - 0.017033142857128206
+ - 0.02179251639591687
+ - 0.023487348081671272
+ - - - -0.00019916773495421797
+ - 0.00208043727258367
+ - 0.0047384189317228875
+ - 0.007505575274382732
+ - 0.004655702188345737
+ - 0.008008305092195035
+ - 0.005612533677667672
+ - 0.00509544153524583
+ - 0.0048064605497048045
+ - 0.0024590487576589574
+ - 0.003911115173633854
+ - 0.0011168029842131752
+ - 0.004623479019813867
+ - 0.0038293841804242956
+ - 0.00014589294153960895
+ - 0.0017309787155625489
+ - - -0.0007338663283889298
+ - -0.0036425978290927584
+ - 0.0024204966002488596
+ - 0.0031434610283545203
+ - 0.004883412312836981
+ - 0.0007263209811888224
+ - 0.0028362723606369924
+ - 0.0028070921614808918
+ - 0.0024768169260683754
+ - 0.005226792024624831
+ - 0.0009329199067020501
+ - 0.001487077723986059
+ - 0.004357150955764686
+ - 0.0029634330641006477
+ - 0.0009438981777479477
+ - -4.763490389979085e-05
+ - - - -0.0003458488621602107
+ - 0.00031952626423962485
+ - 0.003188695359172655
+ - 0.006257881828591613
+ - 0.0027725934917696786
+ - 0.008057994299212879
+ - 0.006429948356408203
+ - 0.003777808116421864
+ - 0.0030139155166347634
+ - 0.001797669783208213
+ - 0.0023877838458169847
+ - 0.001824939014827577
+ - 0.002652586202122326
+ - 0.003191193397409811
+ - 0.0004626788006136306
+ - -0.00034401509142790114
+ - - -5.141495506804325e-05
+ - -0.002287577422119696
+ - 0.003468158635853343
+ - 0.0031445725354066556
+ - 0.003211362217290363
+ - 0.00033580471033606936
+ - 0.0038718968872296445
+ - 0.003848487414563351
+ - 0.003705991640241818
+ - 0.006266060524740595
+ - 0.0003461378547975874
+ - 0.002712295371281167
+ - 0.006224877950807533
+ - 0.003517702040470045
+ - 0.00011055034006066084
+ - 0.0023272830022866117
+ green_diff_weights:
+ offset: -9.63783472961455e-05
+weights_odd: !!python/object:__main__.ModelHalfWeights
+ dark_col_mean_weights:
+ dark_col_row_weights:
+ - - - 0.007554762774669693
+ - 0.014857510496144819
+ - 0.010921660825633695
+ - 0.023782770547857844
+ - 0.011987838254678971
+ - 0.018235828260517402
+ - 0.014784524361535684
+ - 0.01362942610556369
+ - 0.017758010214843054
+ - 0.014636194892241017
+ - 0.0056011157503628424
+ - 0.0037067705276759783
+ - 0.014554211588141788
+ - 0.018654640819716704
+ - 0.015120401921271622
+ - 0.02041669600846488
+ - - 0.05272814988427591
+ - 0.0398138175853199
+ - 0.02383788606927056
+ - 0.03838301697113201
+ - 0.038492548669069
+ - 0.013332934344603049
+ - 0.04681000433301662
+ - 0.032467743946852624
+ - 0.04848562123351527
+ - 0.042557441716158675
+ - 0.010246319045024157
+ - 0.022821850783738707
+ - 0.03516636966913072
+ - 0.0292789937332532
+ - 0.03306614117017175
+ - 0.0322710433948809
+ - - - -0.0002785054580308522
+ - 0.0014515266510817843
+ - 0.00491718785197455
+ - 0.006964999807405011
+ - 0.004985202701335723
+ - 0.00949415749676426
+ - 0.005392188832693768
+ - 0.004857076815007034
+ - 0.004589682782657536
+ - 0.0027040920973336264
+ - 0.004491203657730007
+ - 0.0014202575165478811
+ - 0.004959766689720979
+ - 0.003503704990595909
+ - 0.00029013909868036447
+ - 0.0017356352982861186
+ - - -0.000806661747994489
+ - -0.0031197424440435094
+ - 0.002561945384910218
+ - 0.004135216911878582
+ - 0.006932643975074619
+ - 0.0004744160574888187
+ - 0.002198176599217834
+ - 0.004028067733769825
+ - 0.0029276686861227585
+ - 0.005991508059403796
+ - 0.0012913492002779858
+ - 0.0022662129848185497
+ - 0.005462054736318753
+ - 0.004523250314041137
+ - 0.0009748332327635684
+ - 2.9802656306821434e-05
+ - - - -0.0002849885672407741
+ - -0.0003774829958385583
+ - 0.0022411390204828165
+ - 0.004875918533036685
+ - 0.002868039857524666
+ - 0.009957990471875842
+ - 0.006956109819959009
+ - 0.003833875176334014
+ - 0.0015939266022263977
+ - 0.0021584535763991834
+ - 0.002963302550015843
+ - 0.0021785867385929903
+ - 0.003451261778410779
+ - 0.0038442191601356156
+ - 0.00042658715835023687
+ - 0.0003076585872251006
+ - - -0.0011006144545319136
+ - -0.002314728068297064
+ - 0.0031407800485580683
+ - 0.004375079751551236
+ - 0.004646897417617314
+ - 0.000588569756929701
+ - 0.003613821454858875
+ - 0.004483108213012488
+ - 0.004507162953462568
+ - 0.007983366898058984
+ - 0.00045340233890280046
+ - 0.0032893601994512522
+ - 0.007921939793923522
+ - 0.004462309031833437
+ - 0.0011681721675813753
+ - 0.00237342042041449
+ green_diff_weights:
+ offset: -0.0008359085587901745
+
diff --git a/src/nodes_cpu/row_noise_removal/only_green.yml b/src/nodes_cpu/row_noise_removal/only_green.yml
new file mode 100644
index 0000000..0ace9b7
--- /dev/null
+++ b/src/nodes_cpu/row_noise_removal/only_green.yml
@@ -0,0 +1,24 @@
+!!python/object:__main__.ModelWeights
+weights_even: !!python/object:__main__.ModelHalfWeights
+ dark_col_mean_weights:
+ dark_col_row_weights:
+ green_diff_weights:
+ - - -0.17836376680291102
+ - -0.4105501952617383
+ - - 0.43629936409448755
+ - 0.4204623906920429
+ - - 0.19277238810231043
+ - -0.16900017632955364
+ offset: -0.0010874289591121151
+weights_odd: !!python/object:__main__.ModelHalfWeights
+ dark_col_mean_weights:
+ dark_col_row_weights:
+ green_diff_weights:
+ - - 0.4879804645339848
+ - 0.43341050056294045
+ - - -0.17254326519042207
+ - -0.18052946235127315
+ - - 0.4236788587840668
+ - 0.23795484501101222
+ offset: -0.002760103438085751
+
diff --git a/src/nodes_gpu/calibrate.glsl b/src/nodes_gpu/darkframe_subtract.glsl
similarity index 85%
rename from src/nodes_gpu/calibrate.glsl
rename to src/nodes_gpu/darkframe_subtract.glsl
index 027a104..58b4184 100644
--- a/src/nodes_gpu/calibrate.glsl
+++ b/src/nodes_gpu/darkframe_subtract.glsl
@@ -14,7 +14,7 @@ layout(push_constant) uniform PushConstantData {
layout(set = 0, binding = 0) buffer readonly Source { uint8_t data[]; } source;
layout(set = 0, binding = 1) buffer writeonly Sink { uint8_t data[]; } sink;
-layout(set = 0, binding = 2) uniform sampler2D lut_sampler;
+layout(set = 0, binding = 2) uniform sampler2D darkframe_sampler;
void main() {
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
@@ -28,8 +28,8 @@ void main() {
float first_value = float((a << 4) | (b >> 4)) + 128.0;
float second_value = float(((b << 8) & 0xf00) | c) + 128.0;
- float corr_first_v = texelFetch(lut_sampler, pos * ivec2(2, 1), 0).r;
- float corr_second_v = texelFetch(lut_sampler, pos * ivec2(2, 1) + ivec2(1, 0), 0).r;
+ float corr_first_v = texelFetch(darkframe_sampler, pos * ivec2(2, 1), 0).r;
+ float corr_second_v = texelFetch(darkframe_sampler, pos * ivec2(2, 1) + ivec2(1, 0), 0).r;
uint corr_first = uint(round(first_value - corr_first_v));
uint corr_second = uint(round(second_value - corr_second_v));
diff --git a/src/nodes_gpu/calibrate.rs b/src/nodes_gpu/darkframe_subtract.rs
similarity index 95%
rename from src/nodes_gpu/calibrate.rs
rename to src/nodes_gpu/darkframe_subtract.rs
index c4cf6bf..e563965 100644
--- a/src/nodes_gpu/calibrate.rs
+++ b/src/nodes_gpu/darkframe_subtract.rs
@@ -27,11 +27,11 @@ use vulkano::{
mod compute_shader {
vulkano_shaders::shader! {
ty: "compute",
- path: "src/nodes_gpu/calibrate.glsl"
+ path: "src/nodes_gpu/darkframe_subtract.glsl"
}
}
-pub struct Calibrate {
+pub struct DarkframeSubtract {
device: Arc,
pipeline: Arc,
queue: Arc,
@@ -40,7 +40,7 @@ pub struct Calibrate {
darkframe_sampler: Arc,
}
-impl Parameterizable for Calibrate {
+impl Parameterizable for DarkframeSubtract {
fn describe_parameters() -> ParametersDescriptor {
ParametersDescriptor::new()
.with("input", Mandatory(NodeInputParameter))
@@ -99,7 +99,7 @@ impl Parameterizable for Calibrate {
)
.unwrap();
- Ok(Calibrate {
+ Ok(DarkframeSubtract {
device,
pipeline,
queue,
@@ -111,12 +111,12 @@ impl Parameterizable for Calibrate {
}
#[async_trait]
-impl ProcessingNode for Calibrate {
+impl ProcessingNode for DarkframeSubtract {
async fn pull(&self, request: Request) -> Result {
let input = self.input.pull(request).await?;
let (frame, fut) = ensure_gpu_buffer::(&input, self.queue.clone())
- .context("Wrong input format for Calibrate")?;
+ .context("Wrong input format for DarkframeSubtract")?;
let sink_buffer = DeviceLocalBuffer::<[u8]>::array(
self.device.clone(),
diff --git a/src/nodes_gpu/mod.rs b/src/nodes_gpu/mod.rs
index e9f4f9a..bcbf2c3 100644
--- a/src/nodes_gpu/mod.rs
+++ b/src/nodes_gpu/mod.rs
@@ -1,6 +1,6 @@
pub mod bitdepth_convert;
-pub mod calibrate;
pub mod color_voodoo;
+pub mod darkframe_subtract;
pub mod debayer;
pub mod debayer_resolution_loss;
pub mod lut_3d;
diff --git a/src/nodes_io/writer_cinema_dng.rs b/src/nodes_io/writer_cinema_dng.rs
index d17ef10..6892ca5 100644
--- a/src/nodes_io/writer_cinema_dng.rs
+++ b/src/nodes_io/writer_cinema_dng.rs
@@ -18,7 +18,7 @@ use dng::{
};
use std::{
fs,
- fs::{create_dir, File},
+ fs::{create_dir, remove_dir_all, File},
io::Write,
path::PathBuf,
str::FromStr,
@@ -45,6 +45,7 @@ impl Parameterizable for CinemaDngWriter {
.with("priority", Optional(U8()))
.with("number-of-frames", Optional(NaturalWithZero()))
.with("dcp-yaml", Optional(StringParameter))
+ .with("exists-ok?", Optional(Bool()))
}
fn from_parameters(
@@ -70,6 +71,10 @@ impl Parameterizable for CinemaDngWriter {
base_ifd.insert_from_other(dcp_ifd);
let filename = parameters.take("path")?;
+ if parameters.take("exists-ok?")? {
+ // we dont care if this fails
+ let _ = remove_dir_all(&filename);
+ }
create_dir(&filename).context("Error while creating target directory")?;
Ok(Self {
diff --git a/src/pipeline_processing/frame.rs b/src/pipeline_processing/frame.rs
index 9b69d1c..e300d56 100644
--- a/src/pipeline_processing/frame.rs
+++ b/src/pipeline_processing/frame.rs
@@ -15,6 +15,9 @@ pub trait FrameInterpretation: ToAny {
fn fps(&self) -> Option;
}
+
+// TODO(robin): add some way to index frame 2d, by having a slice tied to interp
+
/// The main data structure for transferring and representing single raw frames
/// of a video stream
pub struct Frame {
@@ -34,6 +37,7 @@ impl CfaDescriptor {
}
}
+// TODO(robin): this needs black level!!!
#[derive(Clone, Copy, Debug)]
pub struct Raw {
pub width: u64,
@@ -45,7 +49,7 @@ pub struct Raw {
impl FrameInterpretation for Raw {
fn required_bytes(&self) -> usize {
- self.width as usize * self.height as usize * self.bit_depth as usize / 8
+ (self.width as usize * self.height as usize * self.bit_depth as usize + 7) / 8
}
fn width(&self) -> u64 { self.width }
fn height(&self) -> u64 { self.height }