Skip to content

Commit

Permalink
Remove unused error from ramp data
Browse files Browse the repository at this point in the history
  • Loading branch information
melanieclarke committed Jan 29, 2025
1 parent c9a45dd commit 3c24bdd
Show file tree
Hide file tree
Showing 15 changed files with 38 additions and 164 deletions.
14 changes: 2 additions & 12 deletions src/stcal/dark_current/dark_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def __init__(self, dims=None, dark_model=None):
during the dark current step. This argument is only used if the
'dark_model' argument is None. If a dark model is not available
from which to create a DarkData class, but the dimensions of the
data array are known, then 'dims' is used (the arrays data, groupdq,
and err are assumed to have the same dimension).
data array are known, then 'dims' is used (the arrays data
and groupdq are assumed to have the same dimension).
dark_model : data model, optional
Expand All @@ -39,10 +39,8 @@ def __init__(self, dims=None, dark_model=None):
if dark_model is not None:
if isinstance(dark_model.data, u.Quantity):
self.data = dark_model.data.value
self.err = dark_model.err.value
else:
self.data = dark_model.data
self.err = dark_model.err
self.groupdq = dark_model.dq

self.exp_nframes = dark_model.meta.exposure.nframes
Expand All @@ -52,7 +50,6 @@ def __init__(self, dims=None, dark_model=None):
elif dims is not None:
self.data = np.zeros(dims, dtype=np.float32)
self.groupdq = np.zeros(dims, dtype=np.uint32)
self.err = np.zeros(dims, dtype=np.float32)

self.exp_nframes = None
self.exp_ngroups = None
Expand All @@ -61,7 +58,6 @@ def __init__(self, dims=None, dark_model=None):
else:
self.data = None
self.groupdq = None
self.err = None

self.exp_nframes = None
self.exp_ngroups = None
Expand Down Expand Up @@ -92,11 +88,6 @@ def __init__(self, science_model=None):
self.groupdq = science_model.groupdq
self.pixeldq = science_model.pixeldq

if isinstance(science_model.err, u.Quantity):
self.err = science_model.err.value
else:
self.err = science_model.err

self.exp_nframes = science_model.meta.exposure.nframes
self.exp_groupgap = science_model.meta.exposure.groupgap
try: # JWST only
Expand All @@ -109,7 +100,6 @@ def __init__(self, science_model=None):
self.data = None
self.groupdq = None
self.pixeldq = None
self.err = None

self.exp_nframes = None
self.exp_groupgap = None
Expand Down
18 changes: 5 additions & 13 deletions src/stcal/dark_current/dark_sub.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,16 +214,12 @@ def average_dark_frames_3d(dark_data, ngroups, nframes, groupgap):
if nframes == 1:
log.debug("copy dark frame %d", start)
avg_dark.data[group] = dark_data.data[start]
avg_dark.err[group] = dark_data.err[start]

# Otherwise average nframes into a new group: take the mean of
# the SCI arrays and the quadratic sum of the ERR arrays.
# the SCI arrays
else:
log.debug("average dark frames %d to %d", start + 1, end)
avg_dark.data[group] = dark_data.data[start:end].mean(axis=0)
avg_dark.err[group] = np.sqrt(np.add.reduce(dark_data.err[start:end] ** 2, axis=0)) / (
end - start
)

# Skip over unused frames
start = end + groupgap
Expand Down Expand Up @@ -298,16 +294,12 @@ def average_dark_frames_4d(dark_data, nints, ngroups, nframes, groupgap):
if nframes == 1:
log.debug("copy dark frame %d", start)
avg_dark.data[it, group] = dark_data.data[it, start]
avg_dark.err[it, group] = dark_data.err[it, start]

# Otherwise average nframes into a new group: take the mean of
# the SCI arrays and the quadratic sum of the ERR arrays.
# the SCI arrays
else:
log.debug("average dark frames %d to %d", start + 1, end)
avg_dark.data[it, group] = dark_data.data[it, start:end].mean(axis=0)
avg_dark.err[it, group] = np.sqrt(
np.add.reduce(dark_data.err[it, start:end] ** 2, axis=0)
) / (end - start)

# Skip over unused frames
start = end + groupgap
Expand All @@ -322,9 +314,9 @@ def average_dark_frames_4d(dark_data, nints, ngroups, nframes, groupgap):

def subtract_dark(science_data, dark_data):
"""
Subtracts dark current data from science arrays, combines
error arrays in quadrature, and updates data quality array based on
DQ flags in the dark arrays.
Subtracts dark current data from science arrays.
Also updates data quality array based on DQ flags in the dark arrays.
Parameters
----------
Expand Down
14 changes: 4 additions & 10 deletions src/stcal/jump/jump.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ def detect_jumps(
indata,
gdq,
pdq,
inerr,
gain_2d,
readnoise_2d,
rejection_thresh,
Expand Down Expand Up @@ -69,12 +68,12 @@ def detect_jumps(
turn.
Note that the detection methods are currently set up on the assumption
that the input science and error data arrays will be in units of
that the input science data array will be in units of
electrons, hence this routine scales those input arrays by the detector
gain. The methods assume that the read noise values will be in units
of DN.
The gain is applied to the science data and error arrays using the
The gain is applied to the science data array using the
appropriate instrument- and detector-dependent values for each pixel of an
image. Also, a 2-dimensional read noise array with appropriate values for
each pixel is passed to the detection methods.
Expand All @@ -93,9 +92,6 @@ def detect_jumps(
pdq : int, 2D array
pixelg dq array
inerr : float, 4D array
error array
gain_2d : float, 2D array
gain for all pixels
Expand Down Expand Up @@ -247,10 +243,9 @@ def detect_jumps(
pdq[wh_g] = np.bitwise_or(pdq[wh_g], dqflags["NO_GAIN_VALUE"])
pdq[wh_g] = np.bitwise_or(pdq[wh_g], dqflags["DO_NOT_USE"])

# Apply gain to the SCI, ERR, and readnoise arrays so they're in units
# Apply gain to the SCI and readnoise arrays so they're in units
# of electrons
data = indata * gain_2d
err = inerr * gain_2d
readnoise_2d *= gain_2d
# also apply to the after_jump thresholds
after_jump_flag_e1 = after_jump_flag_dn1 * np.nanmedian(gain_2d)
Expand Down Expand Up @@ -474,10 +469,9 @@ def detect_jumps(
elapsed = time.time() - start
log.info("Total elapsed time = %g sec", elapsed)

# Back out the applied gain to the SCI, ERR, and readnoise arrays so they're
# Back out the applied gain to the SCI and readnoise arrays so they're
# back in units of DN
data /= gain_2d
err /= gain_2d
readnoise_2d /= gain_2d
# Return the updated data quality arrays
return gdq, pdq, total_primary_crs, number_extended_events, stddev
Expand Down
26 changes: 2 additions & 24 deletions src/stcal/ramp_fitting/gls_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,11 +465,11 @@ def slice_ramp_data(ramp_data, start_row, nrows):

# Slice data by row
data = ramp_data.data[:, :, start_row : start_row + nrows, :].copy()
err = ramp_data.err[:, :, start_row : start_row + nrows, :].copy()
groupdq = ramp_data.groupdq[:, :, start_row : start_row + nrows, :].copy()
pixeldq = ramp_data.pixeldq[start_row : start_row + nrows, :].copy()
average_dark_current = ramp_data.average_dark_current[start_row : start_row + nrows, :].copy()

Check warning on line 470 in src/stcal/ramp_fitting/gls_fit.py

View check run for this annotation

Codecov / codecov/patch

src/stcal/ramp_fitting/gls_fit.py#L470

Added line #L470 was not covered by tests

ramp_data_slice.set_arrays(data, err, groupdq, pixeldq)
ramp_data_slice.set_arrays(data, groupdq, pixeldq, average_dark_current)

Check warning on line 472 in src/stcal/ramp_fitting/gls_fit.py

View check run for this annotation

Codecov / codecov/patch

src/stcal/ramp_fitting/gls_fit.py#L472

Added line #L472 was not covered by tests

# Carry over meta data.
ramp_data_slice.set_meta(
Expand Down Expand Up @@ -564,18 +564,13 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt):
if ngroups == 1:
med_rates = utils.compute_median_rates(ramp_data)

# We'll propagate error estimates from previous steps to the
# current step by using the variance.
input_var = ramp_data.err**2

# Convert the data section from DN to electrons.
data *= gain_2d

for num_int in range(number_ints):
ramp_data.current_integ = num_int
gdq_cube = gdq[num_int, :, :, :]
data_cube = data[num_int, :, :, :]
input_var_sect = input_var[num_int, :, :, :]

if save_opt:
first_group[:, :] = data[num_int, 0, :, :].copy()
Expand All @@ -590,7 +585,6 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt):
) = determine_slope(
ramp_data,
data_cube,
input_var_sect,
gdq_cube,
readnoise_2d,
gain_2d,
Expand Down Expand Up @@ -767,7 +761,6 @@ def create_opt_res(save_opt, dims, max_num_cr):
def determine_slope(
ramp_data,
data_sect,
input_var_sect,
gdq_sect,
readnoise_sect,
gain_sect,
Expand Down Expand Up @@ -876,9 +869,6 @@ def determine_slope(
nx is the number of pixels in the X (more rapidly varying)
direction. The units should be electrons.
input_var_sect : 3-D ndarray, shape (ngroups, ny, nx)
The square of the input ERR array, matching data_sect.
gdq_sect : 3-D ndarray, shape (ngroups, ny, nx)
The group data quality array. This may be a subarray, matching
data_sect.
Expand Down Expand Up @@ -941,7 +931,6 @@ def determine_slope(
return determine_slope_one_group(
ramp_data,
data_sect,
input_var_sect,
gdq_sect,
readnoise_sect,
gain_sect,
Expand Down Expand Up @@ -971,7 +960,6 @@ def determine_slope(
while not done:
(intercept_sect, int_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect) = compute_slope(
data_sect,
input_var_sect,
gdq_sect,
readnoise_sect,
gain_sect,
Expand Down Expand Up @@ -1016,7 +1004,6 @@ def determine_slope(
def determine_slope_one_group(
ramp_data,
data_sect,
input_var_sect,
gdq_sect,
readnoise_sect,
gain_sect,
Expand All @@ -1043,9 +1030,6 @@ def determine_slope_one_group(
nx is the number of pixels in the X (more rapidly varying)
direction. The units should be electrons.
input_var_sect : 3-D ndarray, shape (ngroups, ny, nx)
The square of the input ERR array, matching data_sect.
gdq_sect : 3-D ndarray, shape (ngroups, ny, nx)
The group data quality array. This may be a subarray, matching
data_sect.
Expand Down Expand Up @@ -1235,7 +1219,6 @@ def positive_fit(current_fit):

def compute_slope(
data_sect,
input_var_sect,
gdq_sect,
readnoise_sect,
gain_sect,
Expand All @@ -1261,9 +1244,6 @@ def compute_slope(
The ramp data for one of the integrations in an exposure. This
may be a subarray in detector coordinates, but covering all groups.
input_var_sect : 3-D ndarray, shape (ngroups, ny, nx)
The square of the input ERR array, matching data_sect.
gdq_sect : 3-D ndarray; shape (ngroups, ny, nx)
The group data quality array. This may be a subarray, matching
data_sect.
Expand Down Expand Up @@ -1406,7 +1386,6 @@ def compute_slope(
# ramp_data will be a ramp with a 1-D array of pixels copied out
# of data_sect.
ramp_data = np.empty((ngroups, nz), dtype=data_sect.dtype)
input_var_data = np.empty((ngroups, nz), dtype=data_sect.dtype)
prev_fit_data = np.empty((ngroups, nz), dtype=prev_fit.dtype)
prev_slope_data = np.empty(nz, dtype=prev_slope_sect.dtype)
prev_slope_data[:] = prev_slope_sect[ncr_mask]
Expand All @@ -1423,7 +1402,6 @@ def compute_slope(
saturated_data = np.empty((ngroups, nz), dtype=prev_fit.dtype)
for k in range(ngroups):
ramp_data[k] = data_sect[k][ncr_mask]
input_var_data[k] = input_var_sect[k][ncr_mask]
prev_fit_data[k] = prev_fit[k][ncr_mask]
cr_flagged_2d[k] = cr_flagged[k][ncr_mask]
# This is for clobbering saturated pixels.
Expand Down
2 changes: 2 additions & 0 deletions src/stcal/ramp_fitting/likely_algo_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,8 @@ def calc_bias(self, countrates, sig, cvec, da=1e-7):
Bias of the best-fit count rate from using cvec plus the observed
resultants to estimate the covariance matrix.
"""
raise NotImplementedError('Bias calculations are not implemented.')

Check warning on line 281 in src/stcal/ramp_fitting/likely_algo_classes.py

View check run for this annotation

Codecov / codecov/patch

src/stcal/ramp_fitting/likely_algo_classes.py#L281

Added line #L281 was not covered by tests

alpha = countrates[np.newaxis, :] * self.alpha_phnoise[:, np.newaxis]
alpha += sig**2 * self.alpha_readnoise[:, np.newaxis]
beta = countrates[np.newaxis, :] * self.beta_phnoise[:, np.newaxis]
Expand Down
14 changes: 2 additions & 12 deletions src/stcal/ramp_fitting/ols_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -545,12 +545,11 @@ def slice_ramp_data(ramp_data, start_row, nrows):

# Slice data by row
data = ramp_data.data[:, :, start_row : start_row + nrows, :].copy()
err = ramp_data.err[:, :, start_row : start_row + nrows, :].copy()
groupdq = ramp_data.groupdq[:, :, start_row : start_row + nrows, :].copy()
pixeldq = ramp_data.pixeldq[start_row : start_row + nrows, :].copy()
average_dark_current = ramp_data.average_dark_current[start_row : start_row + nrows, :].copy()

ramp_data_slice.set_arrays(data, err, groupdq, pixeldq, average_dark_current)
ramp_data_slice.set_arrays(data, groupdq, pixeldq, average_dark_current)

if ramp_data.zeroframe is not None:
ramp_data_slice.zeroframe = ramp_data.zeroframe[:, start_row : start_row + nrows, :].copy()
Expand Down Expand Up @@ -785,7 +784,6 @@ def endianness_handler(ramp_data, gain_2d, readnoise_2d):
readnoise_2d, rn_bswap = handle_array_endianness(readnoise_2d, sys_order)

ramp_data.data, _ = handle_array_endianness(ramp_data.data, sys_order)
ramp_data.err, _ = handle_array_endianness(ramp_data.err, sys_order)
ramp_data.average_dark_current , _ = handle_array_endianness(ramp_data.average_dark_current, sys_order)
ramp_data.groupdq, _ = handle_array_endianness(ramp_data.groupdq, sys_order)
ramp_data.pixeldq, _ = handle_array_endianness(ramp_data.pixeldq, sys_order)
Expand Down Expand Up @@ -927,7 +925,6 @@ def discard_miri_groups(ramp_data):
True if usable data available for further processing.
"""
data = ramp_data.data
err = ramp_data.err
groupdq = ramp_data.groupdq
orig_gdq = ramp_data.orig_gdq

Expand Down Expand Up @@ -958,7 +955,6 @@ def discard_miri_groups(ramp_data):

if num_bad_slices > 0:
data = data[:, num_bad_slices:, :, :]
err = err[:, num_bad_slices:, :, :]
if orig_gdq is not None:
orig_gdq = orig_gdq[:, num_bad_slices:, :, :]

Expand All @@ -978,7 +974,6 @@ def discard_miri_groups(ramp_data):
return False

data = data[:, :-1, :, :]
err = err[:, :-1, :, :]
groupdq = groupdq[:, :-1, :, :]
if orig_gdq is not None:
orig_gdq = orig_gdq[:, :-1, :, :]
Expand All @@ -993,7 +988,6 @@ def discard_miri_groups(ramp_data):
return False

ramp_data.data = data
ramp_data.err = err
ramp_data.groupdq = groupdq
if orig_gdq is not None:
ramp_data.orig_gdq = orig_gdq
Expand Down Expand Up @@ -1061,7 +1055,6 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting):
"""
# Get image data information
data = ramp_data.data
err = ramp_data.err
groupdq = ramp_data.groupdq
inpixeldq = ramp_data.pixeldq

Expand All @@ -1073,7 +1066,7 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting):
imshape = (nrows, ncols)
cubeshape = (ngroups, *imshape)

# Get GROUP DQ and ERR arrays from input file
# Get GROUP DQ array from input file
gdq_cube = groupdq
gdq_cube_shape = gdq_cube.shape

Expand Down Expand Up @@ -1198,7 +1191,6 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting):
del pixeldq_sect

ramp_data.data = data
ramp_data.err = err
ramp_data.groupdq = groupdq
ramp_data.pixeldq = inpixeldq

Expand Down Expand Up @@ -1282,7 +1274,6 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans)
"""
# Get image data information
data = ramp_data.data
err = ramp_data.err
groupdq = ramp_data.groupdq
inpixeldq = ramp_data.pixeldq

Expand Down Expand Up @@ -1440,7 +1431,6 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans)
del segs_4

ramp_data.data = data
ramp_data.err = err
ramp_data.groupdq = groupdq
ramp_data.pixeldq = inpixeldq

Expand Down
Loading

0 comments on commit 3c24bdd

Please sign in to comment.