Skip to content

Commit

Permalink
Fix AttributeError in 'PerceptualLoss' (#7693)
Browse files Browse the repository at this point in the history
Fixes #7692

### Description

Fix AttributeError in 'PerceptualLoss' 

### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not
applicable items -->
- [x] Non-breaking change (fix or new feature that would not break
existing functionality).
- [ ] Breaking change (fix or new feature that would cause existing
functionality to change).
- [ ] New tests added to cover the changes.
- [ ] Integration tests passed locally by running `./runtests.sh -f -u
--net --coverage`.
- [ ] Quick tests passed locally by running `./runtests.sh --quick
--unittests --disttests`.
- [ ] In-line docstrings updated.
- [ ] Documentation updated, tested `make html` command in the `docs/`
folder.

---------

Signed-off-by: YunLiu <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
KumoLiu and pre-commit-ci[bot] authored Apr 22, 2024
1 parent c6bf8e9 commit 178ebc8
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 18 deletions.
1 change: 1 addition & 0 deletions monai/losses/perceptual.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def __init__(
self.perceptual_function = LPIPS(pretrained=pretrained, net=network_type, verbose=False)
self.is_fake_3d = is_fake_3d
self.fake_3d_ratio = fake_3d_ratio
self.channel_wise = channel_wise

def _calculate_axis_loss(self, input: torch.Tensor, target: torch.Tensor, spatial_axis: int) -> torch.Tensor:
"""
Expand Down
16 changes: 8 additions & 8 deletions tests/test_clip_intensity_percentiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def test_hard_clipping_two_sided(self, p):
result = hard_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = clip(convert_to_tensor(im), lower, upper)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
Expand All @@ -40,7 +40,7 @@ def test_hard_clipping_one_sided_high(self, p):
result = hard_clipper(im)
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
Expand All @@ -49,7 +49,7 @@ def test_hard_clipping_one_sided_low(self, p):
result = hard_clipper(im)
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
Expand Down Expand Up @@ -89,7 +89,7 @@ def test_channel_wise(self, p):
for i, c in enumerate(im):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-4, atol=0)

def test_ill_sharpness_factor(self):
with self.assertRaises(ValueError):
Expand Down Expand Up @@ -121,7 +121,7 @@ def test_hard_clipping_two_sided(self, p):
result = hard_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
Expand All @@ -130,7 +130,7 @@ def test_hard_clipping_one_sided_high(self, p):
result = hard_clipper(im)
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
Expand All @@ -139,7 +139,7 @@ def test_hard_clipping_one_sided_low(self, p):
result = hard_clipper(im)
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
Expand Down Expand Up @@ -179,7 +179,7 @@ def test_channel_wise(self, p):
for i, c in enumerate(im):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-4, atol=0)


if __name__ == "__main__":
Expand Down
19 changes: 9 additions & 10 deletions tests/test_clip_intensity_percentilesd.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from monai.transforms import ClipIntensityPercentilesd
from monai.transforms.utils import soft_clip
from monai.transforms.utils_pytorch_numpy_unification import clip, percentile
from monai.utils.type_conversion import convert_to_tensor
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose


Expand All @@ -32,8 +31,8 @@ def test_hard_clipping_two_sided(self, p):
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = clip(convert_to_tensor(im), lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0)
expected = clip(im, lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
Expand All @@ -43,7 +42,7 @@ def test_hard_clipping_one_sided_high(self, p):
result = hard_clipper({key: im})
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
Expand All @@ -53,7 +52,7 @@ def test_hard_clipping_one_sided_low(self, p):
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
Expand Down Expand Up @@ -97,7 +96,7 @@ def test_channel_wise(self, p):
for i, c in enumerate(im):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-4, atol=0)

def test_ill_sharpness_factor(self):
key = "img"
Expand Down Expand Up @@ -135,7 +134,7 @@ def test_hard_clipping_two_sided(self, p):
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
Expand All @@ -145,7 +144,7 @@ def test_hard_clipping_one_sided_high(self, p):
result = hard_clipper({key: im})
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
Expand All @@ -155,7 +154,7 @@ def test_hard_clipping_one_sided_low(self, p):
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
Expand Down Expand Up @@ -199,7 +198,7 @@ def test_channel_wise(self, p):
for i, c in enumerate(im):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-7, atol=0)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-4, atol=0)


if __name__ == "__main__":
Expand Down

0 comments on commit 178ebc8

Please sign in to comment.