Skip to content

Commit

Permalink
Fix status update with faulty PlacementBinding
Browse files Browse the repository at this point in the history
With a PlacementBinding containing an invalid
placementRef, the propagator was returning an
error and endlessly re-reconciling. This occurs
even if the PlacementBinding was not bound to the
Policy.

ref: https://issues.redhat.com/browse/ACM-9930

Signed-off-by: Dale Haiducek <[email protected]>
(cherry picked from commit 41176f2)
Signed-off-by: Dale Haiducek <[email protected]>
  • Loading branch information
dhaiducek committed Feb 15, 2024
1 parent cede849 commit 2483eaf
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 13 deletions.
5 changes: 4 additions & 1 deletion controllers/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,11 @@ func HasValidPlacementRef(pb *policiesv1.PlacementBinding) bool {
func GetDecisions(
ctx context.Context, c client.Client, pb *policiesv1.PlacementBinding,
) ([]appsv1.PlacementDecision, error) {
// If the PlacementRef is invalid, log and return. (This is not recoverable.)
if !HasValidPlacementRef(pb) {
return nil, fmt.Errorf("placement binding %s/%s reference is not valid", pb.Namespace, pb.Name)
log.Info(fmt.Sprintf("PlacementBinding %s/%s placementRef is not valid. Ignoring.", pb.Namespace, pb.Name))

return nil, nil
}

refNN := types.NamespacedName{
Expand Down
11 changes: 7 additions & 4 deletions controllers/common/common_status_update.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,6 @@ func RootStatusUpdate(ctx context.Context, c client.Client, rootPolicy *policies
func GetPolicyPlacementDecisions(ctx context.Context, c client.Client,
instance *policiesv1.Policy, pb *policiesv1.PlacementBinding,
) (decisions []appsv1.PlacementDecision, placements []*policiesv1.Placement, err error) {
if !HasValidPlacementRef(pb) {
return nil, nil, fmt.Errorf("placement binding %s/%s reference is not valid", pb.Namespace, pb.Name)
}

policySubjectFound := false
policySetSubjects := make(map[string]struct{}) // a set, to prevent duplicates

Expand Down Expand Up @@ -109,6 +105,13 @@ func GetPolicyPlacementDecisions(ctx context.Context, c client.Client,
return nil, nil, nil
}

// If the PlacementRef is invalid, log and return. (This is not recoverable.)
if !HasValidPlacementRef(pb) {
log.Info(fmt.Sprintf("Placement binding %s/%s placementRef is not valid. Ignoring.", pb.Namespace, pb.Name))

return nil, nil, nil
}

// If the placementRef exists, then it needs to be added to the placement item
refNN := types.NamespacedName{
Namespace: pb.GetNamespace(),
Expand Down
32 changes: 24 additions & 8 deletions test/e2e/case2_aggregation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,32 @@ var _ = Describe("Test policy status aggregation", func() {
const (
case2PolicyName string = "case2-test-policy"
case2PolicyYaml string = "../resources/case2_aggregation/case2-test-policy.yaml"
faultyPBName string = "case2-faulty-placementbinding"
faultyPBYaml string = "../resources/case2_aggregation/faulty-placementbinding.yaml"
)

Describe("Root status from different placements", func() {
Describe("Root status from different placements", Ordered, func() {
AfterAll(func() {
utils.Kubectl("delete",
"-f", faultyPBYaml,
"-n", testNamespace)
utils.Kubectl("delete",
"-f", case2PolicyYaml,
"-n", testNamespace)
opt := metav1.ListOptions{}
utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 0, false, 10)
})

It("should create the faulty PlacementBinding in user ns", func() {
By("Creating " + faultyPBName)
utils.Kubectl("apply",
"-f", faultyPBYaml,
"-n", testNamespace)
pb := utils.GetWithTimeout(
clientHubDynamic, gvrPlacementBinding, faultyPBName, testNamespace, true, defaultTimeoutSeconds,
)
Expect(pb).NotTo(BeNil())
})
It("should be created in user ns", func() {
By("Creating " + case2PolicyYaml)
utils.Kubectl("apply",
Expand Down Expand Up @@ -220,13 +243,6 @@ var _ = Describe("Test policy status aggregation", func() {
return rootPlc.Object["status"]
}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(emptyStatus))
})
It("should clean up", func() {
utils.Kubectl("delete",
"-f", case2PolicyYaml,
"-n", testNamespace)
opt := metav1.ListOptions{}
utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 0, false, 10)
})
})
Describe("Root compliance from managed statuses", Ordered, func() {
// To get around `testNamespace` not being initialized during Ginkgo's Tree Construction phase
Expand Down
12 changes: 12 additions & 0 deletions test/resources/case2_aggregation/faulty-placementbinding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
name: case2-faulty-placementbinding
placementRef:
apiGroup: cluster.open-cluster-management.io
kind: PlacementRule
name: case2-test-policy-plr
subjects:
- apiGroup: policy.open-cluster-management.io
kind: Policy
name: rando-policy

0 comments on commit 2483eaf

Please sign in to comment.