From f380a3fe14be8b4e8a216a9ee733ae88a7b9cd72 Mon Sep 17 00:00:00 2001 From: Nikhil-Ladha Date: Thu, 11 Jul 2024 15:19:38 +0530 Subject: [PATCH 1/3] groupreplication: add controller logic for volume group replication added controller logic for volume group replication Signed-off-by: Nikhil-Ladha --- .../v1alpha1/volumegroupreplication_types.go | 9 +- .../volumegroupreplicationcontent_types.go | 21 +- cmd/manager/main.go | 6 +- .../replication.storage/finalizers.go | 87 ++- .../replication.storage/parameters.go | 10 +- .../controller/replication.storage/pvc.go | 39 +- .../replication.storage/pvc_test.go | 16 +- .../controller/replication.storage/utils.go | 73 ++ .../volumegroupreplication_controller.go | 631 +++++++++++++++++- .../volumegroupreplication_test.go | 199 ++++++ .../volumegroupreplicationclass.go | 44 ++ .../volumegroupreplicationclass_test.go | 78 +++ ...olumegroupreplicationcontent_controller.go | 270 +++++++- .../volumereplication_controller.go | 110 +-- 14 files changed, 1442 insertions(+), 151 deletions(-) create mode 100644 internal/controller/replication.storage/utils.go create mode 100644 internal/controller/replication.storage/volumegroupreplication_test.go create mode 100644 internal/controller/replication.storage/volumegroupreplicationclass.go create mode 100644 internal/controller/replication.storage/volumegroupreplicationclass_test.go diff --git a/api/replication.storage/v1alpha1/volumegroupreplication_types.go b/api/replication.storage/v1alpha1/volumegroupreplication_types.go index 8ad2bff70..6a2b1d60e 100644 --- a/api/replication.storage/v1alpha1/volumegroupreplication_types.go +++ b/api/replication.storage/v1alpha1/volumegroupreplication_types.go @@ -21,6 +21,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + VolumeGroupReplicationNameAnnotation = "replication.storage.openshift.io/volume-group-replication-name" +) + // VolumeGroupReplicationSpec defines the desired state of VolumeGroupReplication type VolumeGroupReplicationSpec struct { // volumeGroupReplicationClassName is the volumeGroupReplicationClass name for this VolumeGroupReplication resource @@ -28,9 +32,10 @@ type VolumeGroupReplicationSpec struct { // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeGroupReplicationClassName is immutable" VolumeGroupReplicationClassName string `json:"volumeGroupReplicationClassName"` - // volumeReplicationClassName is the volumeReplicationClass name for VolumeReplication object + // volumeReplicationClassName is the volumeReplicationClass name for the VolumeReplication object + // created for this volumeGroupReplication // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumReplicationClassName is immutable" + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeReplicationClassName is immutable" VolumeReplicationClassName string `json:"volumeReplicationClassName"` // Name of the VolumeReplication object created for this volumeGroupReplication diff --git a/api/replication.storage/v1alpha1/volumegroupreplicationcontent_types.go b/api/replication.storage/v1alpha1/volumegroupreplicationcontent_types.go index fcbb7a95a..9be2833bc 100644 --- a/api/replication.storage/v1alpha1/volumegroupreplicationcontent_types.go +++ b/api/replication.storage/v1alpha1/volumegroupreplicationcontent_types.go @@ -21,19 +21,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + VolumeGroupReplicationContentNameAnnotation = "replication.storage.openshift.io/volumegroupreplication-content-name" +) + // VolumeGroupReplicationContentSpec defines the desired state of VolumeGroupReplicationContent type VolumeGroupReplicationContentSpec struct { // VolumeGroupreplicationRef specifies the VolumeGroupReplication object to which this // VolumeGroupReplicationContent object is bound. // VolumeGroupReplication.Spec.VolumeGroupReplicationContentName field must reference to // this VolumeGroupReplicationContent's name for the bidirectional binding to be valid. - // For a pre-existing VolumeGroupReplicationContent object, name and namespace of the - // VolumeGroupReplication object MUST be provided for binding to happen. - // This field is immutable after creation. - // Required. - // +kubebuilder:validation:XValidation:rule="has(self.name) && has(self.__namespace__)",message="both volumeGroupReplicationRef.name and volumeGroupReplicationRef.namespace must be set" - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeGroupReplicationRef is immutable" - VolumeGroupReplicationRef corev1.ObjectReference `json:"volumeGroupReplicationRef"` + // +kubebuilder:validation:XValidation:rule="self != null ? has(self.name) && has(self.__namespace__) && has(self.uid) : true",message="volumeGroupReplicationRef.name, volumeGroupReplicationRef.namespace and volumeGroupReplicationRef.uid must be set if volumeGroupReplicationRef is defined" + // +optional + VolumeGroupReplicationRef *corev1.ObjectReference `json:"volumeGroupReplicationRef,omitempty"` // VolumeGroupReplicationHandle is a unique id returned by the CSI driver // to identify the VolumeGroupReplication on the storage system. @@ -49,12 +49,11 @@ type VolumeGroupReplicationContentSpec struct { // VolumeGroupReplicationClassName is the name of the VolumeGroupReplicationClass from // which this group replication was (or will be) created. - // +optional + // Required. VolumeGroupReplicationClassName string `json:"volumeGroupReplicationClassName"` - // Source specifies whether the snapshot is (or should be) dynamically provisioned + // Source specifies whether the volume is (or should be) dynamically provisioned // or already exists, and just requires a Kubernetes object representation. - // This field is immutable after creation. // Required. Source VolumeGroupReplicationContentSource `json:"source"` } @@ -68,7 +67,7 @@ type VolumeGroupReplicationContentSource struct { // VolumeGroupReplicationContentStatus defines the status of VolumeGroupReplicationContent type VolumeGroupReplicationContentStatus struct { - // PersistentVolumeRefList is the list of of PV for the group replication + // PersistentVolumeRefList is the list of PV for the group replication // The maximum number of allowed PV in the group is 100. // +optional PersistentVolumeRefList []corev1.LocalObjectReference `json:"persistentVolumeRefList,omitempty"` diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 743bbf3c3..d070f5799 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -251,8 +251,10 @@ func main() { os.Exit(1) } if err = (&replicationController.VolumeGroupReplicationContentReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Connpool: connPool, + Timeout: defaultTimeout, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "VolumeGroupReplicationContent") os.Exit(1) diff --git a/internal/controller/replication.storage/finalizers.go b/internal/controller/replication.storage/finalizers.go index ce880595f..109d5273e 100644 --- a/internal/controller/replication.storage/finalizers.go +++ b/internal/controller/replication.storage/finalizers.go @@ -26,6 +26,7 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -65,43 +66,44 @@ func (r *VolumeReplicationReconciler) removeFinalizerFromVR(logger logr.Logger, return nil } -// addFinalizerToPVC adds the VR finalizer on the PersistentVolumeClaim. -func (r *VolumeReplicationReconciler) addFinalizerToPVC(logger logr.Logger, pvc *corev1.PersistentVolumeClaim) error { - if !slices.Contains(pvc.ObjectMeta.Finalizers, pvcReplicationFinalizer) { - logger.Info("adding finalizer to PersistentVolumeClaim object", "Finalizer", pvcReplicationFinalizer) - pvc.ObjectMeta.Finalizers = append(pvc.ObjectMeta.Finalizers, pvcReplicationFinalizer) - if err := r.Client.Update(context.TODO(), pvc); err != nil { +// AddFinalizerToPVC adds the VR, VGR finalizer on the PersistentVolumeClaim. +func AddFinalizerToPVC(client client.Client, logger logr.Logger, pvc *corev1.PersistentVolumeClaim, + replicationFinalizer string) error { + if !slices.Contains(pvc.ObjectMeta.Finalizers, replicationFinalizer) { + logger.Info("adding finalizer to PersistentVolumeClaim object", "Finalizer", replicationFinalizer) + pvc.ObjectMeta.Finalizers = append(pvc.ObjectMeta.Finalizers, replicationFinalizer) + if err := client.Update(context.TODO(), pvc); err != nil { return fmt.Errorf("failed to add finalizer (%s) to PersistentVolumeClaim resource"+ " (%s/%s) %w", - pvcReplicationFinalizer, pvc.Namespace, pvc.Name, err) + replicationFinalizer, pvc.Namespace, pvc.Name, err) } } return nil } -// removeFinalizerFromPVC removes the VR finalizer on PersistentVolumeClaim. -func (r *VolumeReplicationReconciler) removeFinalizerFromPVC(logger logr.Logger, pvc *corev1.PersistentVolumeClaim, -) error { - if slices.Contains(pvc.ObjectMeta.Finalizers, pvcReplicationFinalizer) { - logger.Info("removing finalizer from PersistentVolumeClaim object", "Finalizer", pvcReplicationFinalizer) - pvc.ObjectMeta.Finalizers = util.RemoveFromSlice(pvc.ObjectMeta.Finalizers, pvcReplicationFinalizer) - if err := r.Client.Update(context.TODO(), pvc); err != nil { +// RemoveFinalizerFromPVC removes the VR, VGR finalizer on PersistentVolumeClaim. +func RemoveFinalizerFromPVC(client client.Client, logger logr.Logger, pvc *corev1.PersistentVolumeClaim, + replicationFinalizer string) error { + if slices.Contains(pvc.ObjectMeta.Finalizers, replicationFinalizer) { + logger.Info("removing finalizer from PersistentVolumeClaim object", "Finalizer", replicationFinalizer) + pvc.ObjectMeta.Finalizers = util.RemoveFromSlice(pvc.ObjectMeta.Finalizers, replicationFinalizer) + if err := client.Update(context.TODO(), pvc); err != nil { return fmt.Errorf("failed to remove finalizer (%s) from PersistentVolumeClaim resource"+ " (%s/%s), %w", - pvcReplicationFinalizer, pvc.Namespace, pvc.Name, err) + replicationFinalizer, pvc.Namespace, pvc.Name, err) } } return nil } -// addFinalizerToVGR adds the VR finalizer on the VolumeGroupReplication. -func (r *VolumeReplicationReconciler) addFinalizerToVGR(logger logr.Logger, vgr *replicationv1alpha1.VolumeGroupReplication) error { +// AddFinalizerToVGR adds the VGR finalizer on the VolumeGroupReplication resource +func AddFinalizerToVGR(client client.Client, logger logr.Logger, vgr *replicationv1alpha1.VolumeGroupReplication) error { if !slices.Contains(vgr.ObjectMeta.Finalizers, vgrReplicationFinalizer) { - logger.Info("adding finalizer to VolumeGroupReplication object", "Finalizer", vgrReplicationFinalizer) + logger.Info("adding finalizer to volumeGroupReplication object", "Finalizer", vgrReplicationFinalizer) vgr.ObjectMeta.Finalizers = append(vgr.ObjectMeta.Finalizers, vgrReplicationFinalizer) - if err := r.Client.Update(context.TODO(), vgr); err != nil { + if err := client.Update(context.TODO(), vgr); err != nil { return fmt.Errorf("failed to add finalizer (%s) to VolumeGroupReplication resource"+ " (%s/%s) %w", vgrReplicationFinalizer, vgr.Namespace, vgr.Name, err) @@ -111,13 +113,18 @@ func (r *VolumeReplicationReconciler) addFinalizerToVGR(logger logr.Logger, vgr return nil } -// removeFinalizerFromVGR removes the VR finalizer on VolumeGroupReplication. -func (r *VolumeReplicationReconciler) removeFinalizerFromVGR(logger logr.Logger, vgr *replicationv1alpha1.VolumeGroupReplication, -) error { +// RemoveFinalizerFromVGR removes the VGR finalizer from the VolumeGroupReplication instance. +func RemoveFinalizerFromVGR(client client.Client, logger logr.Logger, vgr *replicationv1alpha1.VolumeGroupReplication) error { if slices.Contains(vgr.ObjectMeta.Finalizers, vgrReplicationFinalizer) { - logger.Info("removing finalizer from VolumeGroupReplication object", "Finalizer", vgrReplicationFinalizer) + logger.Info("removing finalizer from volumeGroupReplication object", "Finalizer", vgrReplicationFinalizer) + // Check if owner annotations are removed from the VGR resource + if vgr.Annotations[replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation] != "" || + vgr.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation] != "" { + return fmt.Errorf("failed to remove finalizer from volumeGroupReplication object"+ + ",dependent resources are not yet deleted (%s/%s)", vgr.Namespace, vgr.Name) + } vgr.ObjectMeta.Finalizers = util.RemoveFromSlice(vgr.ObjectMeta.Finalizers, vgrReplicationFinalizer) - if err := r.Client.Update(context.TODO(), vgr); err != nil { + if err := client.Update(context.TODO(), vgr); err != nil { return fmt.Errorf("failed to remove finalizer (%s) from VolumeGroupReplication resource"+ " (%s/%s), %w", vgrReplicationFinalizer, vgr.Namespace, vgr.Name, err) @@ -126,3 +133,35 @@ func (r *VolumeReplicationReconciler) removeFinalizerFromVGR(logger logr.Logger, return nil } + +// AddFinalizerToVGRContent adds the VR, VGR finalizer on the VolumeGroupReplicationContent resource +func AddFinalizerToVGRContent(client client.Client, logger logr.Logger, vgrContent *replicationv1alpha1.VolumeGroupReplicationContent, + replicationFinalizer string) error { + if !slices.Contains(vgrContent.ObjectMeta.Finalizers, replicationFinalizer) { + logger.Info("adding finalizer to volumeGroupReplicationContent object", "Finalizer", replicationFinalizer) + vgrContent.ObjectMeta.Finalizers = append(vgrContent.ObjectMeta.Finalizers, replicationFinalizer) + if err := client.Update(context.TODO(), vgrContent); err != nil { + return fmt.Errorf("failed to add finalizer (%s) to VolumeGroupReplicationContent resource"+ + " (%s/%s) %w", + replicationFinalizer, vgrContent.Namespace, vgrContent.Name, err) + } + } + + return nil +} + +// RemoveFinalizerFromVGRContent removes the VR, VGR finalizer from the VolumeGroupReplicationContent instance. +func RemoveFinalizerFromVGRContent(client client.Client, logger logr.Logger, vgrContent *replicationv1alpha1.VolumeGroupReplicationContent, + replicationFinalizer string) error { + if slices.Contains(vgrContent.ObjectMeta.Finalizers, replicationFinalizer) { + logger.Info("removing finalizer from volumeGroupReplicationContent object", "Finalizer", replicationFinalizer) + vgrContent.ObjectMeta.Finalizers = util.RemoveFromSlice(vgrContent.ObjectMeta.Finalizers, replicationFinalizer) + if err := client.Update(context.TODO(), vgrContent); err != nil { + return fmt.Errorf("failed to remove finalizer (%s) from VolumeGroupReplicationContent resource"+ + " (%s/%s), %w", + replicationFinalizer, vgrContent.Namespace, vgrContent.Name, err) + } + } + + return nil +} diff --git a/internal/controller/replication.storage/parameters.go b/internal/controller/replication.storage/parameters.go index 1f3f58404..08182c73f 100644 --- a/internal/controller/replication.storage/parameters.go +++ b/internal/controller/replication.storage/parameters.go @@ -29,8 +29,10 @@ const ( // Driver. replicationParameterPrefix = "replication.storage.openshift.io/" - prefixedReplicationSecretNameKey = replicationParameterPrefix + "replication-secret-name" // name key for secret - prefixedReplicationSecretNamespaceKey = replicationParameterPrefix + "replication-secret-namespace" // namespace key secret + prefixedReplicationSecretNameKey = replicationParameterPrefix + "replication-secret-name" // name key for secret + prefixedReplicationSecretNamespaceKey = replicationParameterPrefix + "replication-secret-namespace" // namespace key secret + prefixedGroupReplicationSecretNameKey = replicationParameterPrefix + "group-replication-secret-name" // name key for secret + prefixedGroupReplicationSecretNamespaceKey = replicationParameterPrefix + "group-replication-secret-namespace" // namespace key secret ) // filterPrefixedParameters removes all the reserved keys from the @@ -53,10 +55,14 @@ func validatePrefixedParameters(param map[string]string) error { if strings.HasPrefix(k, replicationParameterPrefix) { switch k { case prefixedReplicationSecretNameKey: + fallthrough + case prefixedGroupReplicationSecretNameKey: if v == "" { return errors.New("secret name cannot be empty") } case prefixedReplicationSecretNamespaceKey: + fallthrough + case prefixedGroupReplicationSecretNamespaceKey: if v == "" { return errors.New("secret namespace cannot be empty") } diff --git a/internal/controller/replication.storage/pvc.go b/internal/controller/replication.storage/pvc.go index 4f35321c2..4d0405bf9 100644 --- a/internal/controller/replication.storage/pvc.go +++ b/internal/controller/replication.storage/pvc.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" ) @@ -59,22 +60,27 @@ func (r VolumeReplicationReconciler) getPVCDataSource(logger logr.Logger, req ty return pvc, pv, nil } -// annotatePVCWithOwner will add the VolumeReplication details to the PVC annotations. -func (r *VolumeReplicationReconciler) annotatePVCWithOwner(ctx context.Context, logger logr.Logger, reqOwnerName string, pvc *corev1.PersistentVolumeClaim) error { +// AnnotatePVCWithOwner will add the VolumeReplication/VolumeGroupReplication details to the PVC annotations. +func AnnotatePVCWithOwner(client client.Client, logger logr.Logger, reqOwnerName string, + pvc *corev1.PersistentVolumeClaim, pvcAnnotation string) error { if pvc.ObjectMeta.Annotations == nil { pvc.ObjectMeta.Annotations = map[string]string{} } - currentOwnerName := pvc.ObjectMeta.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation] + if pvc.ObjectMeta.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation] != "" && + pvc.ObjectMeta.Annotations[replicationv1alpha1.VolumeGroupReplicationNameAnnotation] != "" { + logger.Info("PVC can't be part of both VolumeGroupReplication and VolumeReplication") + return fmt.Errorf("PVC %q can't be owned by both VolumeReplication and VolumeGroupReplication", pvc.Name) + } + + currentOwnerName := pvc.ObjectMeta.Annotations[pvcAnnotation] if currentOwnerName == "" { logger.Info("setting owner on PVC annotation", "Name", pvc.Name, "owner", reqOwnerName) - pvc.ObjectMeta.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation] = reqOwnerName - err := r.Update(ctx, pvc) + pvc.ObjectMeta.Annotations[pvcAnnotation] = reqOwnerName + err := client.Update(context.TODO(), pvc) if err != nil { logger.Error(err, "Failed to update PVC annotation", "Name", pvc.Name) - - return fmt.Errorf("failed to update PVC %q annotation for VolumeReplication: %w", - pvc.Name, err) + return fmt.Errorf("failed to update PVC %q annotation for replication: %w", pvc.Name, err) } return nil @@ -86,22 +92,23 @@ func (r *VolumeReplicationReconciler) annotatePVCWithOwner(ctx context.Context, "current owner", currentOwnerName, "requested owner", reqOwnerName) - return fmt.Errorf("PVC %q not owned by VolumeReplication %q", + return fmt.Errorf("PVC %q not owned by correct VolumeReplication/VolumeGroupReplication %q", pvc.Name, reqOwnerName) } return nil } -// removeOwnerFromPVCAnnotation removes the VolumeReplication owner from the PVC annotations. -func (r *VolumeReplicationReconciler) removeOwnerFromPVCAnnotation(ctx context.Context, logger logr.Logger, pvc *corev1.PersistentVolumeClaim) error { - if _, ok := pvc.ObjectMeta.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation]; ok { - logger.Info("removing owner annotation from PersistentVolumeClaim object", "Annotation", replicationv1alpha1.VolumeReplicationNameAnnotation) - delete(pvc.ObjectMeta.Annotations, replicationv1alpha1.VolumeReplicationNameAnnotation) - if err := r.Client.Update(ctx, pvc); err != nil { +// RemoveOwnerFromPVCAnnotation removes the VolumeReplication/VolumeGroupReplication owner from the PVC annotations. +func RemoveOwnerFromPVCAnnotation(client client.Client, logger logr.Logger, pvc *corev1.PersistentVolumeClaim, + pvcAnnotation string) error { + if _, ok := pvc.ObjectMeta.Annotations[pvcAnnotation]; ok { + logger.Info("removing owner annotation from PersistentVolumeClaim object", "Annotation", pvcAnnotation) + delete(pvc.ObjectMeta.Annotations, pvcAnnotation) + if err := client.Update(context.TODO(), pvc); err != nil { return fmt.Errorf("failed to remove annotation %q from PersistentVolumeClaim "+ "%q %w", - replicationv1alpha1.VolumeReplicationNameAnnotation, pvc.Name, err) + pvcAnnotation, pvc.Name, err) } } diff --git a/internal/controller/replication.storage/pvc_test.go b/internal/controller/replication.storage/pvc_test.go index 1e2926fcf..ecac23aed 100644 --- a/internal/controller/replication.storage/pvc_test.go +++ b/internal/controller/replication.storage/pvc_test.go @@ -18,6 +18,7 @@ package controller import ( "context" + "fmt" "testing" replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" @@ -178,6 +179,7 @@ func TestGetVolumeHandle(t *testing.T) { func TestVolumeReplicationReconciler_annotatePVCWithOwner(t *testing.T) { t.Parallel() vrName := "test-vr" + vrNamespace := "test-ns" testcases := []struct { name string @@ -196,7 +198,7 @@ func TestVolumeReplicationReconciler_annotatePVCWithOwner(t *testing.T) { Name: "pvc-name", Namespace: mockNamespace, Annotations: map[string]string{ - replicationv1alpha1.VolumeReplicationNameAnnotation: vrName, + replicationv1alpha1.VolumeReplicationNameAnnotation: fmt.Sprintf("%s/%s", vrName, vrNamespace), }, }, }, @@ -220,13 +222,15 @@ func TestVolumeReplicationReconciler_annotatePVCWithOwner(t *testing.T) { for _, tc := range testcases { volumeReplication := &replicationv1alpha1.VolumeReplication{} mockVolumeReplicationObj.DeepCopyInto(volumeReplication) + volumeReplication.Name = vrName testPVC := &corev1.PersistentVolumeClaim{} tc.pvc.DeepCopyInto(testPVC) ctx := context.TODO() reconciler := createFakeVolumeReplicationReconciler(t, testPVC, volumeReplication) - err := reconciler.annotatePVCWithOwner(ctx, log.FromContext(context.TODO()), vrName, testPVC) + reqOwner := fmt.Sprintf("%s/%s", volumeReplication.Name, volumeReplication.Namespace) + err := AnnotatePVCWithOwner(reconciler.Client, log.FromContext(context.TODO()), reqOwner, testPVC, replicationv1alpha1.VolumeReplicationNameAnnotation) if tc.errorExpected { assert.Error(t, err) } else { @@ -241,14 +245,14 @@ func TestVolumeReplicationReconciler_annotatePVCWithOwner(t *testing.T) { err = reconciler.Get(ctx, pvcNamespacedName, testPVC) assert.NoError(t, err) - assert.Equal(t, testPVC.ObjectMeta.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation], vrName) + assert.Equal(t, testPVC.ObjectMeta.Annotations[replicationv1alpha1.VolumeReplicationNameAnnotation], reqOwner) } - err = reconciler.removeOwnerFromPVCAnnotation(context.TODO(), log.FromContext(context.TODO()), testPVC) + err = RemoveOwnerFromPVCAnnotation(reconciler.Client, log.FromContext(context.TODO()), testPVC, replicationv1alpha1.VolumeReplicationNameAnnotation) assert.NoError(t, err) // try calling delete again, it should not fail - err = reconciler.removeOwnerFromPVCAnnotation(context.TODO(), log.FromContext(context.TODO()), testPVC) + err = RemoveOwnerFromPVCAnnotation(reconciler.Client, log.FromContext(context.TODO()), testPVC, replicationv1alpha1.VolumeReplicationNameAnnotation) assert.NoError(t, err) } @@ -262,6 +266,6 @@ func TestVolumeReplicationReconciler_annotatePVCWithOwner(t *testing.T) { } volumeReplication := &replicationv1alpha1.VolumeReplication{} reconciler := createFakeVolumeReplicationReconciler(t, pvc, volumeReplication) - err := reconciler.removeOwnerFromPVCAnnotation(context.TODO(), log.FromContext(context.TODO()), pvc) + err := RemoveOwnerFromPVCAnnotation(reconciler.Client, log.FromContext(context.TODO()), pvc, replicationv1alpha1.VolumeReplicationNameAnnotation) assert.NoError(t, err) } diff --git a/internal/controller/replication.storage/utils.go b/internal/controller/replication.storage/utils.go new file mode 100644 index 000000000..714189392 --- /dev/null +++ b/internal/controller/replication.storage/utils.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func GetReplicationState(instanceState replicationv1alpha1.ReplicationState) replicationv1alpha1.State { + switch instanceState { + case replicationv1alpha1.Primary: + return replicationv1alpha1.PrimaryState + case replicationv1alpha1.Secondary: + return replicationv1alpha1.SecondaryState + case replicationv1alpha1.Resync: + return replicationv1alpha1.SecondaryState + } + + return replicationv1alpha1.UnknownState +} + +func GetCurrentReplicationState(instanceStatusState replicationv1alpha1.State) replicationv1alpha1.State { + if instanceStatusState == "" { + return replicationv1alpha1.UnknownState + } + + return instanceStatusState +} + +func WaitForVolumeReplicationResource(client client.Client, logger logr.Logger, resourceName string) error { + unstructuredResource := &unstructured.UnstructuredList{} + unstructuredResource.SetGroupVersionKind(schema.GroupVersionKind{ + Group: replicationv1alpha1.GroupVersion.Group, + Kind: resourceName, + Version: replicationv1alpha1.GroupVersion.Version, + }) + for { + err := client.List(context.TODO(), unstructuredResource) + if err == nil { + return nil + } + // return errors other than NoMatch + if !meta.IsNoMatchError(err) { + logger.Error(err, "got an unexpected error while waiting for resource", "Resource", resourceName) + return err + } + logger.Info("resource does not exist", "Resource", resourceName) + time.Sleep(5 * time.Second) + } +} diff --git a/internal/controller/replication.storage/volumegroupreplication_controller.go b/internal/controller/replication.storage/volumegroupreplication_controller.go index 3426fef2a..130c81b92 100644 --- a/internal/controller/replication.storage/volumegroupreplication_controller.go +++ b/internal/controller/replication.storage/volumegroupreplication_controller.go @@ -18,13 +18,39 @@ package controller import ( "context" + "fmt" + "reflect" + "slices" + "strings" + "time" + "github.com/csi-addons/kubernetes-csi-addons/internal/controller/replication.storage/replication" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + "github.com/go-logr/logr" +) + +const ( + volumeGroupReplicationClass = "VolumeGroupReplicationClass" + volumeGroupReplication = "VolumeGroupReplication" + volumeGroupReplicationContent = "VolumeGroupReplicationContent" + volumeGroupReplicationRef = "replication.storage.openshift.io/volumegroupref" ) // VolumeGroupReplicationReconciler reconciles a VolumeGroupReplication object @@ -33,21 +59,618 @@ type VolumeGroupReplicationReconciler struct { Scheme *runtime.Scheme } -//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplications,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplications,verbs=get;list;watch;update;patch //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplications/status,verbs=get;update;patch //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplications/finalizers,verbs=update +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationclasses,verbs=get;list;watch +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationcontents,verbs=get;list;watch;create;update;delete +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplications,verbs=get;list;watch;create;update;delete +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplications/status,verbs=get;list +//+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims;persistentvolumes,verbs=get;list;watch;update +//+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/finalizers,verbs=update + +/* +Steps performed by the reconcile loop: +- Fetch and validate the VGRClass CR +- Fetch the matching PVCs based on the selector provided in the VGR CR, and check if they are already bounded to a CSI volume and the driver matches the driver provided in the VGRClass CR. +- Annotate to the PVCs with owner and add the VGR finalizer to them. +- Add the label selector to the VGR annotation, such that the PVC triggering a reconcile can fetch the VGR to reconcile +- Create the VGRContent with the PVs list fetched above, add VGR name/namespace as the annotation to it +- Wait for the volumes to be grouped, and the VGRContent to be updated with the group handle +- Then, create the VR CR and add VGR name/namespace as the annotation to it +- Update the VGR status with the VR status. + +In case of deletion: +- Remove the owner annotations and finalizers from the PVC +- Check if VR exists, then delete +- Check if VGRContent exists, then delete +- Remove VGR finalizer <- This won't happen until the dependent VR and VRContent is deleted. Validated using owner annotations set in both the dependent CRs. +*/ // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *VolumeGroupReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + logger := log.FromContext(ctx, "Request.Name", req.Name, "Request.Namespace", req.Namespace) + + // Fetch VolumeGroupReplication instance + instance := &replicationv1alpha1.VolumeGroupReplication{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + logger.Info("volumeGroupReplication resource not found") + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Get VolumeGroupReplicationClass instance + vgrClassObj, err := r.getVolumeGroupReplicationClass(logger, instance.Spec.VolumeGroupReplicationClassName) + if err != nil { + logger.Error(err, "failed to get volumeGroupReplicationClass resource", "VGRClassName", instance.Spec.VolumeGroupReplicationClassName) + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + + // Validate that required parameters are present in the VGRClass resource + err = validatePrefixedParameters(vgrClassObj.Spec.Parameters) + if err != nil { + logger.Error(err, "failed to validate parameters of volumeGroupReplicationClass", "VGRClassName", instance.Spec.VolumeGroupReplicationClassName) + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + + // Declare all dependent resources + vgrContentName := fmt.Sprintf("vgrcontent-%s", instance.UID) + if instance.Spec.VolumeGroupReplicationContentName != "" { + vgrContentName = instance.Spec.VolumeGroupReplicationContentName + } + vgrContentObj := &replicationv1alpha1.VolumeGroupReplicationContent{ + ObjectMeta: metav1.ObjectMeta{ + Name: vgrContentName, + }, + } + vrName := fmt.Sprintf("vr-%s", instance.UID) + if instance.Spec.VolumeReplicationName != "" { + vrName = instance.Spec.VolumeReplicationName + } + vrObj := &replicationv1alpha1.VolumeReplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: vrName, + Namespace: instance.Namespace, + }, + } + + // Create/Update dependent resources only if the instance is not marked for deletion + if instance.GetDeletionTimestamp().IsZero() { + + // Add finalizer to VGR instance + if err = AddFinalizerToVGR(r.Client, logger, instance); err != nil { + logger.Error(err, "failed to add VolumeGroupReplication finalizer") + return reconcile.Result{}, err + } + + // Check if PVCs exist based on provided selectors + pvcList, pvHandlesList, labelSelector, err := r.getMatchingPVCsFromSource(instance, logger, vgrClassObj) + if err != nil { + logger.Error(err, "failed to get PVCs using selector") + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + if len(pvcList.Items) == 0 { + err = fmt.Errorf("no matching PVCs found for the given selectors") + logger.Error(err, "provided selector should match at least 1 PVC") + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } else if len(pvcList.Items) > 100 { + err = fmt.Errorf("more than 100 PVCs match the given selector") + logger.Error(err, "only 100 PVCs are allowed for volume group replication") + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + + // Add the string representation of the labelSelector to the VGR annotation + if instance.ObjectMeta.Annotations == nil { + instance.ObjectMeta.Annotations = make(map[string]string) + } + + if instance.ObjectMeta.Annotations["pvcSelector"] != labelSelector { + instance.ObjectMeta.Annotations["pvcSelector"] = labelSelector + err = r.Client.Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to add pvc selector annotation to VGR") + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + } + + // Update PersistentVolumeClaimsRefList in VGR Status + tmpRefList := []corev1.LocalObjectReference{} + for _, pvc := range pvcList.Items { + tmpRefList = append(tmpRefList, corev1.LocalObjectReference{ + Name: pvc.Name, + }) + } + + // Annotate each PVC with owner and add finalizer to it + for _, pvc := range pvcList.Items { + reqOwner := fmt.Sprintf("%s/%s", instance.Name, instance.Namespace) + err = AnnotatePVCWithOwner(r.Client, logger, reqOwner, &pvc, replicationv1alpha1.VolumeGroupReplicationNameAnnotation) + if err != nil { + logger.Error(err, "Failed to add VGR owner annotation on PVC") + return ctrl.Result{}, err + } + + if err = AddFinalizerToPVC(r.Client, logger, &pvc, vgrReplicationFinalizer); err != nil { + logger.Error(err, "Failed to add VGR finalizer on PersistentVolumeClaim") + return reconcile.Result{}, err + } + } - return ctrl.Result{}, nil + // Update PersistentVolumeClaimsRefList in VGR Status + if !reflect.DeepEqual(instance.Status.PersistentVolumeClaimsRefList, tmpRefList) { + instance.Status.PersistentVolumeClaimsRefList = tmpRefList + err = r.Client.Status().Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to update VolumeGroupReplication resource") + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + } + + // Create/Update VolumeGroupReplicationContent CR + err = r.createOrUpdateVolumeGroupReplicationContentCR(instance, vgrContentObj, vgrClassObj.Spec.Provisioner, pvHandlesList) + if err != nil { + logger.Error(err, "failed to create/update volumeGroupReplicationContent resource", "VGRContentName", vgrContentObj.Name) + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + + // Update the VGR with VGRContentName, if empty + if instance.Spec.VolumeGroupReplicationContentName == "" { + instance.Spec.VolumeGroupReplicationContentName = vgrContentObj.Name + err = r.Client.Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to update volumeGroupReplication instance", "VGRName", instance.Name) + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + } + + // Since, the grouping may take few seconds to happen, just exit and wait for the reconcile + // to be triggered when the group handle is updated in the vgrcontent resource. + if vgrContentObj.Spec.VolumeGroupReplicationHandle == "" { + logger.Info("Either volumegroupreplicationcontent is not yet created or it is still grouping the volumes to be replicated") + return reconcile.Result{}, nil + } else { + // Create/Update VolumeReplication CR + err = r.createOrUpdateVolumeReplicationCR(instance, vrObj) + if err != nil { + logger.Error(err, "failed to create/update volumeReplication resource", "VRName", vrObj.Name) + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + + // Update the VGR with VolumeReplication resource name, if not present + if instance.Spec.VolumeReplicationName == "" { + instance.Spec.VolumeReplicationName = vrObj.Name + err = r.Client.Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to update volumeGroupReplication instance", "VGRName", instance.Name) + _ = r.setGroupReplicationFailure(instance, logger, err) + return reconcile.Result{}, err + } + } + } + } else { + // When the VGR resource is being deleted + // Remove the owner annotation and the finalizer from pvcs that exist in VGR resource's status + if instance.Status.PersistentVolumeClaimsRefList != nil { + for _, pvcRef := range instance.Status.PersistentVolumeClaimsRefList { + pvc := &corev1.PersistentVolumeClaim{} + err = r.Client.Get(ctx, types.NamespacedName{Name: pvcRef.Name, Namespace: req.Namespace}, pvc) + if err != nil { + logger.Error(err, "failed to fetch pvc from VGR status") + return reconcile.Result{}, err + } + + if err = RemoveOwnerFromPVCAnnotation(r.Client, logger, pvc, replicationv1alpha1.VolumeGroupReplicationNameAnnotation); err != nil { + logger.Error(err, "Failed to remove VolumeReplication annotation from PersistentVolumeClaim") + + return reconcile.Result{}, err + } + + if err = RemoveFinalizerFromPVC(r.Client, logger, pvc, vgrReplicationFinalizer); err != nil { + logger.Error(err, "Failed to remove VGR finalizer from PersistentVolumeClaim") + return reconcile.Result{}, err + } + } + } + // If dependent VR was created, delete it + if instance.Spec.VolumeReplicationName != "" { + req := types.NamespacedName{Name: instance.Spec.VolumeReplicationName, Namespace: req.Namespace} + err = r.Client.Get(ctx, req, vrObj) + if err != nil { + if errors.IsNotFound(err) { + logger.Info("volumeReplication resource not found") + } else { + logger.Error(err, "failed to fetch dependent volumeReplication resource") + return reconcile.Result{}, err + } + } else { + err = r.Client.Delete(ctx, vrObj) + if err != nil { + logger.Error(err, "failed to delete dependent volumeReplication resource") + return reconcile.Result{}, err + } + } + } + + // If dependent VGRContent was created, delete it + if instance.Spec.VolumeGroupReplicationContentName != "" { + req := types.NamespacedName{Name: instance.Spec.VolumeGroupReplicationContentName} + err = r.Client.Get(ctx, req, vgrContentObj) + if err != nil { + if errors.IsNotFound(err) { + logger.Info("volumeGroupReplicationContent resource not found") + } else { + logger.Error(err, "failed to fetch dependent volumeGroupReplicationContent resource") + return reconcile.Result{}, err + } + } else { + err = r.Client.Delete(ctx, vgrContentObj) + if err != nil { + logger.Error(err, "failed to delete dependent volumeGroupReplicationContent resource") + return reconcile.Result{}, err + } + } + } + + // Just log error, and exit reconcile without error. The dependent resource will update the VGR + // to remove their names from the CR, that will trigger a reconcile. + if err = RemoveFinalizerFromVGR(r.Client, logger, instance); err != nil { + logger.Error(err, "failed to remove VolumeGroupReplication finalizer") + return reconcile.Result{ + RequeueAfter: 10 * time.Second, + }, nil + } + + logger.Info("volumeGroupReplication object is terminated, skipping reconciliation") + return reconcile.Result{}, nil + } + + // Update VGR status based on VR Status + instance.Status.VolumeReplicationStatus = vrObj.Status + err = r.Client.Status().Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to update volumeGroupReplication instance's status", "VGRName", instance.Name) + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *VolumeGroupReplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Wait for the group CRDs to be present, i.e, VolumeGroupReplication, VolumeGroupReplicationClass and + // VolumeGroupReplicationContent + err := r.waitForGroupCrds() + if err != nil { + return err + } + + // Only reconcile for spec/status update events + skipUpdates := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } + + // Watch for only status updates of the VR resource + watchOnlyStatusUpdates := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + oldObj := e.ObjectOld.(*replicationv1alpha1.VolumeReplication) + newObj := e.ObjectNew.(*replicationv1alpha1.VolumeReplication) + return !reflect.DeepEqual(oldObj.Status, newObj.Status) + }, + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + } + + // Watch for only spec updates of the VGRContent resource + watchOnlySpecUpdates := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + oldObj := e.ObjectOld.(*replicationv1alpha1.VolumeGroupReplicationContent) + newObj := e.ObjectNew.(*replicationv1alpha1.VolumeGroupReplicationContent) + return !reflect.DeepEqual(oldObj.Spec, newObj.Spec) + }, + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + } + + // Enqueue the VGR reconcile with the VGR name,namespace based on the annotation of the VR and VGRContent CR + enqueueVGRRequest := handler.EnqueueRequestsFromMapFunc( + func(context context.Context, obj client.Object) []reconcile.Request { + // Get the VolumeGroupReplication name,namespace + var vgrName, vgrNamespace string + objAnnotations := obj.GetAnnotations() + for k, v := range objAnnotations { + if k == volumeGroupReplicationRef { + vgrName = strings.Split(v, "/")[0] + vgrNamespace = strings.Split(v, "/")[1] + break + } + } + + // Skip reconcile if the triggering resource is not a sub-resource of VGR + if vgrName == "" || vgrNamespace == "" { + return []reconcile.Request{} + } + + // Return a reconcile request with the name, namespace of the VolumeGroupReplication resource + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: vgrNamespace, + Name: vgrName, + }, + }, + } + }, + ) + + // Enqueue the VGR reconcile with the VGR name,namespace based on the labels of the VGR CR + enqueueVGRForPVCRequest := handler.EnqueueRequestsFromMapFunc( + func(context context.Context, obj client.Object) []reconcile.Request { + // Check if the PVC has any labels defined + objLabels := obj.GetLabels() + if len(objLabels) == 0 { + return []reconcile.Request{} + } + + // Check if the resource is present in the cluster + vgrObjsList := &replicationv1alpha1.VolumeGroupReplicationList{} + logger := log.FromContext(context) + err := r.Client.List(context, vgrObjsList) + if err != nil { + logger.Error(err, "failed to list VolumeGroupReplication instances") + return []reconcile.Request{} + } + + // Check if the pvc labels match any VGRs based on selectors present in it's annotation + for _, vgr := range vgrObjsList.Items { + if vgr.Annotations != nil && vgr.Annotations["pvcSelector"] != "" { + labelSelector, err := labels.Parse(vgr.Annotations["pvcSelector"]) + if err != nil { + logger.Error(err, "failed to parse selector from VolumeGroupReplication's annotation") + return []reconcile.Request{} + } + objLabelsSet := labels.Set(objLabels) + if labelSelector.Matches(objLabelsSet) { + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: vgr.Namespace, + Name: vgr.Name, + }, + }, + } + } + } + } + + return []reconcile.Request{} + }, + ) + return ctrl.NewControllerManagedBy(mgr). - For(&replicationv1alpha1.VolumeGroupReplication{}). + For(&replicationv1alpha1.VolumeGroupReplication{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&replicationv1alpha1.VolumeGroupReplicationContent{}, builder.WithPredicates(skipUpdates)). + Owns(&replicationv1alpha1.VolumeReplication{}, builder.WithPredicates(skipUpdates)). + Watches(&replicationv1alpha1.VolumeGroupReplicationContent{}, enqueueVGRRequest, builder.WithPredicates(watchOnlySpecUpdates)). + Watches(&replicationv1alpha1.VolumeReplication{}, enqueueVGRRequest, builder.WithPredicates(watchOnlyStatusUpdates)). + Watches(&corev1.PersistentVolumeClaim{}, enqueueVGRForPVCRequest, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } + +// waitForGroupCrds waits for dependent CRDs to the available in the cluster +func (r *VolumeGroupReplicationReconciler) waitForGroupCrds() error { + logger := log.FromContext(context.TODO(), "Name", "checkingGroupDependencies") + + err := WaitForVolumeReplicationResource(r.Client, logger, volumeGroupReplicationClass) + if err != nil { + logger.Error(err, "failed to wait for VolumeGroupReplicationClass CRD") + return err + } + + err = WaitForVolumeReplicationResource(r.Client, logger, volumeGroupReplication) + if err != nil { + logger.Error(err, "failed to wait for VolumeGroupReplication CRD") + return err + } + + err = WaitForVolumeReplicationResource(r.Client, logger, volumeGroupReplicationContent) + if err != nil { + logger.Error(err, "failed to wait for VolumeGroupReplicationContent CRD") + return err + } + + return nil +} + +// setGroupReplicationFailure sets the failure replication status on the VolumeGroupReplication resource +func (r *VolumeGroupReplicationReconciler) setGroupReplicationFailure( + instance *replicationv1alpha1.VolumeGroupReplication, + logger logr.Logger, err error) error { + + instance.Status.State = GetCurrentReplicationState(instance.Status.State) + instance.Status.Message = replication.GetMessageFromError(err) + instance.Status.ObservedGeneration = instance.Generation + if err := r.Client.Status().Update(context.TODO(), instance); err != nil { + logger.Error(err, "failed to update volumeGroupReplication status", "VGRName", instance.Name) + return err + } + + return nil +} + +// getMatchingPVCsFromSource fecthes the PVCs based on the selectors defined in the VolumeGroupReplication resource +func (r *VolumeGroupReplicationReconciler) getMatchingPVCsFromSource(instance *replicationv1alpha1.VolumeGroupReplication, + logger logr.Logger, + vgrClass *replicationv1alpha1.VolumeGroupReplicationClass) (corev1.PersistentVolumeClaimList, []string, string, error) { + + pvcList := corev1.PersistentVolumeClaimList{} + newSelector := labels.NewSelector() + + if instance.Spec.Source.Selector.MatchLabels != nil { + for key, value := range instance.Spec.Source.Selector.MatchLabels { + req, err := labels.NewRequirement(key, selection.Equals, []string{value}) + if err != nil { + logger.Error(err, "failed to add label selector requirement") + return pvcList, nil, "", err + } + newSelector = newSelector.Add(*req) + } + } + + if instance.Spec.Source.Selector.MatchExpressions != nil { + for _, labelExp := range instance.Spec.Source.Selector.MatchExpressions { + req, err := labels.NewRequirement(labelExp.Key, selection.Operator(labelExp.Operator), labelExp.Values) + if err != nil { + logger.Error(err, "failed to add label selector requirement") + return pvcList, nil, "", err + } + newSelector = newSelector.Add(*req) + } + } + opts := []client.ListOption{ + client.MatchingLabelsSelector{Selector: newSelector}, + client.InNamespace(instance.Namespace), + } + err := r.Client.List(context.TODO(), &pvcList, opts...) + if err != nil { + logger.Error(err, "failed to list pvcs with the given selectors") + return pvcList, nil, "", err + } + + pvHandlesList := []string{} + for _, pvc := range pvcList.Items { + pvName := pvc.Spec.VolumeName + pv := &corev1.PersistentVolume{} + err := r.Client.Get(context.TODO(), types.NamespacedName{Name: pvName}, pv) + if err != nil { + logger.Error(err, "failed to get pv for corresponding pvc", "PVC Name", pvc.Name) + return pvcList, nil, "", err + } + if pv.Spec.CSI == nil { + err = fmt.Errorf("pvc %s is not bound to a CSI PV", pvc.Name) + return pvcList, nil, "", err + } + if pv.Spec.CSI.Driver != vgrClass.Spec.Provisioner { + err = fmt.Errorf("driver of PV for PVC %s is different than the VolumeGroupReplicationClass driver", pvc.Name) + return pvcList, nil, "", err + } + pvHandlesList = append(pvHandlesList, pv.Spec.CSI.VolumeHandle) + } + + // Sort the pvHandles list and then pass, because reflect.DeepEqual checks for positional equality + slices.Sort(pvHandlesList) + + return pvcList, pvHandlesList, newSelector.String(), nil +} + +func (r *VolumeGroupReplicationReconciler) createOrUpdateVolumeGroupReplicationContentCR(vgr *replicationv1alpha1.VolumeGroupReplication, + vgrContentObj *replicationv1alpha1.VolumeGroupReplicationContent, vgrClass string, pvHandlesList []string) error { + vgrRef := fmt.Sprintf("%s/%s", vgr.Name, vgr.Namespace) + _, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, vgrContentObj, func() error { + if vgr.Spec.VolumeGroupReplicationContentName != "" && vgrContentObj.CreationTimestamp.IsZero() { + return fmt.Errorf("dependent VGRContent resource is not yet created, waiting for it to be created") + } + if vgrContentObj.CreationTimestamp.IsZero() { + vgrContentObj.Annotations = map[string]string{ + volumeGroupReplicationRef: vgrRef, + } + vgrContentObj.Spec = replicationv1alpha1.VolumeGroupReplicationContentSpec{ + VolumeGroupReplicationRef: &corev1.ObjectReference{ + APIVersion: vgr.APIVersion, + Kind: vgr.Kind, + Name: vgr.Name, + Namespace: vgr.Namespace, + UID: vgr.UID, + }, + Provisioner: vgrClass, + VolumeGroupReplicationClassName: vgr.Spec.VolumeGroupReplicationClassName, + Source: replicationv1alpha1.VolumeGroupReplicationContentSource{ + VolumeHandles: pvHandlesList, + }, + } + + return nil + } + + if vgrContentObj.Annotations[volumeGroupReplicationRef] != vgrRef { + vgrContentObj.Annotations[volumeGroupReplicationRef] = vgrRef + } + + if vgrContentObj.Spec.VolumeGroupReplicationRef == nil { + vgrContentObj.Spec.VolumeGroupReplicationRef = &corev1.ObjectReference{ + APIVersion: vgr.APIVersion, + Kind: vgr.Kind, + Name: vgr.Name, + Namespace: vgr.Namespace, + UID: vgr.UID, + } + } + + vgrContentObj.Spec.Source = replicationv1alpha1.VolumeGroupReplicationContentSource{ + VolumeHandles: pvHandlesList, + } + + return nil + }) + + return err +} + +func (r *VolumeGroupReplicationReconciler) createOrUpdateVolumeReplicationCR(vgr *replicationv1alpha1.VolumeGroupReplication, + vrObj *replicationv1alpha1.VolumeReplication) error { + apiGroup := "replication.storage.openshift.io" + _, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, vrObj, func() error { + if vrObj.CreationTimestamp.IsZero() { + vrObj.Annotations = map[string]string{ + volumeGroupReplicationRef: fmt.Sprintf("%s/%s", vgr.Name, vgr.Namespace), + } + vrObj.Spec = replicationv1alpha1.VolumeReplicationSpec{ + VolumeReplicationClass: vgr.Spec.VolumeReplicationClassName, + DataSource: corev1.TypedLocalObjectReference{ + APIGroup: &apiGroup, + Kind: vgr.Kind, + Name: vgr.Name, + }, + } + } + + vrObj.Spec.AutoResync = vgr.Spec.AutoResync + vrObj.Spec.ReplicationState = vgr.Spec.ReplicationState + + return controllerutil.SetOwnerReference(vgr, vrObj, r.Scheme) + }) + + return err +} diff --git a/internal/controller/replication.storage/volumegroupreplication_test.go b/internal/controller/replication.storage/volumegroupreplication_test.go new file mode 100644 index 000000000..3681864dc --- /dev/null +++ b/internal/controller/replication.storage/volumegroupreplication_test.go @@ -0,0 +1,199 @@ +/* +Copyright 2024 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" +) + +const ( + mockPV = "test-vgr-pv" + mockPVC = "test-vgr-pvc" +) + +var mockVolumeGroupReplicationObj = &replicationv1alpha1.VolumeGroupReplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-group-replication", + Namespace: mockNamespace, + UID: "testname", + }, + Spec: replicationv1alpha1.VolumeGroupReplicationSpec{ + VolumeGroupReplicationClassName: "volume-group-replication-class", + VolumeReplicationClassName: "volume-replication-class", + Source: replicationv1alpha1.VolumeGroupReplicationSource{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "vgr_test", + }, + }, + }, + }, +} + +var mockVGRPersistentVolume = &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: mockPV, + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + Driver: "test-driver", + VolumeHandle: mockVolumeHandle, + }, + }, + }, +} + +var mockVGRPersistentVolumeClaim = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: mockPVC, + Namespace: mockNamespace, + Labels: map[string]string{ + "test": "vgr_test", + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: mockPV, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, +} + +func createFakeVolumeGroupReplicationReconciler(t *testing.T, obj ...runtime.Object) VolumeGroupReplicationReconciler { + t.Helper() + scheme := createFakeScheme(t) + vgrInit := &replicationv1alpha1.VolumeGroupReplication{} + vgrContentInit := &replicationv1alpha1.VolumeGroupReplicationContent{} + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(obj...).WithStatusSubresource(vgrInit, vgrContentInit).Build() + + return VolumeGroupReplicationReconciler{ + Client: client, + Scheme: scheme, + } +} + +func TestVolumeGroupReplication(t *testing.T) { + t.Parallel() + testcases := []struct { + name string + pv *corev1.PersistentVolume + pvc *corev1.PersistentVolumeClaim + expectedPVCList []string + pvcFound bool + }{ + { + name: "case 1: matching pvc available", + pv: mockVGRPersistentVolume, + pvc: mockVGRPersistentVolumeClaim, + expectedPVCList: []string{mockPVC}, + pvcFound: true, + }, + { + name: "case 2: matching pvc not found", + pv: mockVGRPersistentVolume, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: mockPVC, + Namespace: mockNamespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: mockPV, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + }, + expectedPVCList: []string{}, + pvcFound: false, + }, + } + for _, tc := range testcases { + volumeGroupReplication := &replicationv1alpha1.VolumeGroupReplication{} + mockVolumeGroupReplicationObj.DeepCopyInto(volumeGroupReplication) + + volumeGroupReplicationClass := &replicationv1alpha1.VolumeGroupReplicationClass{} + mockVolumeGroupReplicationClassObj.DeepCopyInto(volumeGroupReplicationClass) + + volumeReplicationClass := &replicationv1alpha1.VolumeReplicationClass{} + mockVolumeReplicationClassObj.DeepCopyInto(volumeReplicationClass) + + testPV := &corev1.PersistentVolume{} + tc.pv.DeepCopyInto(testPV) + + testPVC := &corev1.PersistentVolumeClaim{} + tc.pvc.DeepCopyInto(testPVC) + + r := createFakeVolumeGroupReplicationReconciler(t, testPV, testPVC, volumeReplicationClass, volumeGroupReplicationClass, volumeGroupReplication) + nsKey := types.NamespacedName{ + Namespace: volumeGroupReplication.Namespace, + Name: volumeGroupReplication.Name, + } + req := reconcile.Request{ + NamespacedName: nsKey, + } + res, err := r.Reconcile(context.TODO(), req) + + if tc.pvcFound { + // Check reconcile didn't return any error + assert.Equal(t, reconcile.Result{}, res) + assert.NoError(t, err) + + pvc := &corev1.PersistentVolumeClaim{} + err = r.Client.Get(context.TODO(), types.NamespacedName{Name: testPVC.Name, Namespace: testPVC.Namespace}, pvc) + assert.NoError(t, err) + + vgr := &replicationv1alpha1.VolumeGroupReplication{} + err = r.Client.Get(context.TODO(), nsKey, vgr) + assert.NoError(t, err) + + vgrPVCRefList := vgr.Status.PersistentVolumeClaimsRefList + assert.Equal(t, 1, len(vgrPVCRefList)) + for _, pvc := range vgrPVCRefList { + assert.Equal(t, pvc.Name, mockVGRPersistentVolumeClaim.Name) + } + // Check PVC annotation + expectedOwner := fmt.Sprintf("%s/%s", volumeGroupReplication.Name, volumeGroupReplication.Namespace) + assert.Equal(t, expectedOwner, pvc.ObjectMeta.Annotations[replicationv1alpha1.VolumeGroupReplicationNameAnnotation]) + // Check VGRContent Created + assert.NotEmpty(t, vgr.Spec.VolumeGroupReplicationContentName) + } else { + // Check reconcile returned an error + assert.Equal(t, reconcile.Result{}, res) + assert.Error(t, err) + + vgr := &replicationv1alpha1.VolumeGroupReplication{} + err = r.Client.Get(context.TODO(), nsKey, vgr) + assert.NoError(t, err) + + assert.Empty(t, vgr.Status.PersistentVolumeClaimsRefList) + assert.Empty(t, vgr.Spec.VolumeGroupReplicationContentName) + } + } +} diff --git a/internal/controller/replication.storage/volumegroupreplicationclass.go b/internal/controller/replication.storage/volumegroupreplicationclass.go new file mode 100644 index 000000000..209646098 --- /dev/null +++ b/internal/controller/replication.storage/volumegroupreplicationclass.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +// getVolumeGroupReplicationClass fetches the volumegroupreplicationclass object in the given namespace and return the same. +func (r VolumeGroupReplicationReconciler) getVolumeGroupReplicationClass(logger logr.Logger, vgrClassName string) (*replicationv1alpha1.VolumeGroupReplicationClass, error) { + vgrClassObj := &replicationv1alpha1.VolumeGroupReplicationClass{} + err := r.Client.Get(context.TODO(), types.NamespacedName{Name: vgrClassName}, vgrClassObj) + if err != nil { + if errors.IsNotFound(err) { + logger.Error(err, "VolumeGroupReplicationClass not found", "VolumeGroupReplicationClass", vgrClassName) + } else { + logger.Error(err, "Got an unexpected error while fetching VolumeReplicationClass", "VolumeReplicationClass", vgrClassName) + } + + return nil, err + } + + return vgrClassObj, nil +} diff --git a/internal/controller/replication.storage/volumegroupreplicationclass_test.go b/internal/controller/replication.storage/volumegroupreplicationclass_test.go new file mode 100644 index 000000000..40ed15872 --- /dev/null +++ b/internal/controller/replication.storage/volumegroupreplicationclass_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2024 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "testing" + + replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var mockVolumeGroupReplicationClassObj = &replicationv1alpha1.VolumeGroupReplicationClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-group-replication-class", + }, + Spec: replicationv1alpha1.VolumeGroupReplicationClassSpec{ + Provisioner: "test-driver", + }, +} + +func TestGetVolumeGroupReplicationClass(t *testing.T) { + t.Parallel() + testcases := []struct { + createVgrc bool + errorExpected bool + isErrorNotFound bool + }{ + {createVgrc: true, errorExpected: false, isErrorNotFound: false}, + {createVgrc: false, errorExpected: true, isErrorNotFound: true}, + } + + for _, tc := range testcases { + var objects []runtime.Object + + volumeGroupReplication := &replicationv1alpha1.VolumeGroupReplication{} + mockVolumeGroupReplicationObj.DeepCopyInto(volumeGroupReplication) + objects = append(objects, volumeGroupReplication) + + if tc.createVgrc { + volumeGroupReplicationClass := &replicationv1alpha1.VolumeGroupReplicationClass{} + mockVolumeGroupReplicationClassObj.DeepCopyInto(volumeGroupReplicationClass) + objects = append(objects, volumeGroupReplicationClass) + } + + reconciler := createFakeVolumeGroupReplicationReconciler(t, objects...) + vgrClassObj, err := reconciler.getVolumeGroupReplicationClass(log.FromContext(context.TODO()), mockVolumeGroupReplicationClassObj.Name) + + if tc.errorExpected { + assert.Error(t, err) + if tc.isErrorNotFound { + assert.True(t, errors.IsNotFound(err)) + } + } else { + assert.NoError(t, err) + assert.NotEqual(t, nil, vgrClassObj) + } + } +} diff --git a/internal/controller/replication.storage/volumegroupreplicationcontent_controller.go b/internal/controller/replication.storage/volumegroupreplicationcontent_controller.go index eccfdd30e..d0298afef 100644 --- a/internal/controller/replication.storage/volumegroupreplicationcontent_controller.go +++ b/internal/controller/replication.storage/volumegroupreplicationcontent_controller.go @@ -18,36 +18,298 @@ package controller import ( "context" + "fmt" + "slices" + "time" + replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + grpcClient "github.com/csi-addons/kubernetes-csi-addons/internal/client" + conn "github.com/csi-addons/kubernetes-csi-addons/internal/connection" + "github.com/go-logr/logr" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/csi-addons/spec/lib/go/identity" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - - replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // VolumeGroupReplicationContentReconciler reconciles a VolumeGroupReplicationContent object type VolumeGroupReplicationContentReconciler struct { client.Client Scheme *runtime.Scheme + // ConnectionPool consists of map of Connection objects + Connpool *conn.ConnectionPool + // Timeout for the Reconcile operation. + Timeout time.Duration } //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationcontents,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationcontents/status,verbs=get;update;patch //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationcontents/finalizers,verbs=update +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationclasses,verbs=get;list;watch + +/* +Steps performed by the reconcile loop: +- Watch for VGRContent CR +- Fetch the VGRClass using the name from the VGRContent CR, and extract the secrets from it +- Add VGRContent owner annotation to the VGR resource +- Add finalizer to the VGRContent resource +- Create/Modify the volume group based on the handle field +- Update the group handle in VGRContent CR +- Update the VGRContent status with the PV list +*/ // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *VolumeGroupReplicationContentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + logger := log.FromContext(ctx, "Request.Name", req.Name, "Request.Namespace", req.Namespace) + + // Fetch VolumeGroupReplicationContent instance + instance := &replicationv1alpha1.VolumeGroupReplicationContent{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + logger.Info("volumeGroupReplicationContent resource not found") + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + volumeGroupClient, err := r.getVolumeGroupClient(ctx, instance.Spec.Provisioner) + if err != nil { + logger.Error(err, "Failed to get VolumeGroupClient") + return reconcile.Result{}, err + } + + // Fetch VolumeGroupReplicationClass + vgrClass := &replicationv1alpha1.VolumeGroupReplicationClass{} + err = r.Client.Get(ctx, types.NamespacedName{Name: instance.Spec.VolumeGroupReplicationClassName}, vgrClass) + if err != nil { + logger.Error(err, "failed to fetch volumeGroupReplicationClass resource") + return reconcile.Result{}, err + } + + // Get secrets and parameters + parameters := filterPrefixedParameters(replicationParameterPrefix, vgrClass.Spec.Parameters) + secretName := vgrClass.Spec.Parameters[prefixedGroupReplicationSecretNameKey] + secretNamespace := vgrClass.Spec.Parameters[prefixedGroupReplicationSecretNamespaceKey] + + // Get the VolumeGroupReplication resource + if instance.Spec.VolumeGroupReplicationRef == nil { + logger.Info("waiting for owner VolumeGroupReplication to update the owner ref") + return reconcile.Result{}, nil + } + vgrObj := &replicationv1alpha1.VolumeGroupReplication{} + namespacedObj := types.NamespacedName{Namespace: instance.Spec.VolumeGroupReplicationRef.Namespace, + Name: instance.Spec.VolumeGroupReplicationRef.Name} + err = r.Client.Get(ctx, namespacedObj, vgrObj) + if err != nil { + logger.Error(err, "failed to get owner VolumeGroupReplication") + return reconcile.Result{}, err + } + + // Check if object is being deleted + if instance.GetDeletionTimestamp().IsZero() { + // add vgr as the owner annotation for vgrContent + err = r.annotateVolumeGroupReplicationWithVGRContentOwner(ctx, logger, req.Name, vgrObj) + if err != nil { + logger.Error(err, "Failed to annotate VolumeGroupReplication owner") + return ctrl.Result{}, err + } + // add vgr finalizer to vgrContent + if err = AddFinalizerToVGRContent(r.Client, logger, instance, vgrReplicationFinalizer); err != nil { + logger.Error(err, "failed to add VolumeGroupReplicationContent finalizer") + return reconcile.Result{}, err + } + } else { + // Check if the owner VGR is marked for deletion, only then remove the finalizer from VGRContent resource + if vgrObj.GetDeletionTimestamp().IsZero() { + logger.Info("cannot delete VolumeGroupReplicationContent resource, until owner VolumeGroupReplication instance is deleted") + return reconcile.Result{}, nil + } else if !slices.Contains(instance.ObjectMeta.Finalizers, volumeReplicationFinalizer) { + // Delete the volume group, if exists + if instance.Spec.VolumeGroupReplicationHandle != "" { + // Fetch the volume group using the group id + tmpVG, err := volumeGroupClient.ControllerGetVolumeGroup(instance.Spec.VolumeGroupReplicationHandle, secretName, secretNamespace) + if err != nil { + if status.Code(err) == codes.NotFound { + logger.Info("failed to get volume group, the volume group has already been deleted or doesn't exists") + } else { + logger.Error(err, "failed to get the volume group to be deleted", "Volume Group", instance.Spec.VolumeGroupReplicationHandle) + return reconcile.Result{}, err + } + } else { + // Empty the volume group before deleting the volume group, if not empty + if len(tmpVG.VolumeGroup.GetVolumeIds()) != 0 { + _, err := volumeGroupClient.ModifyVolumeGroupMembership(tmpVG.VolumeGroup.GetVolumeGroupId(), []string{}, secretName, secretNamespace) + if err != nil { + logger.Error(err, "failed to delete volumes from volume group, before deleting volume group") + return reconcile.Result{}, err + } + } + _, err := volumeGroupClient.DeleteVolumeGroup(tmpVG.VolumeGroup.GetVolumeGroupId(), secretName, secretNamespace) + if err != nil { + logger.Error(err, "failed to delete volume group", "Volume Group", tmpVG.VolumeGroup.GetVolumeGroupId()) + return reconcile.Result{}, err + } + } + } + + // Remove the vgrcontent owner annotation from the VGR resource + if err = r.removeVGRContentOwnerFromVGRAnnotation(ctx, logger, vgrObj); err != nil { + logger.Error(err, "Failed to remove VolumeReplication annotation from VolumeGroupReplication") + return reconcile.Result{}, err + } + + // Remove the vgr finalizer from the vgrcontent resource + if err = RemoveFinalizerFromVGRContent(r.Client, logger, instance, vgrReplicationFinalizer); err != nil { + logger.Error(err, "failed to remove finalizer from VolumeGroupReplicationContent resource") + return reconcile.Result{}, err + } + + logger.Info("volumeGroupReplicationContent object is terminated, skipping reconciliation") + return reconcile.Result{}, nil + } else { + logger.Info("cannot delete VolumeGroupReplicationContent resource, until dependent VolumeReplication instance is deleted") + return reconcile.Result{}, nil + } + } + + // Create/Update volume group + if instance.Spec.VolumeGroupReplicationHandle == "" { + groupName := instance.Name + resp, err := volumeGroupClient.CreateVolumeGroup(groupName, instance.Spec.Source.VolumeHandles, secretName, secretNamespace, parameters) + if err != nil { + logger.Error(err, "failed to group volumes") + return reconcile.Result{}, err + } + + // Update the group handle in the VolumeGroupReplicationContent CR + instance.Spec.VolumeGroupReplicationHandle = resp.GetVolumeGroup().VolumeGroupId + err = r.Client.Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to update group id in VGRContent") + return reconcile.Result{}, err + } + } else { + groupID := instance.Spec.VolumeGroupReplicationHandle + _, err := volumeGroupClient.ModifyVolumeGroupMembership(groupID, instance.Spec.Source.VolumeHandles, secretName, secretNamespace) + if err != nil { + logger.Error(err, "failed to modify volume group") + return reconcile.Result{}, err + } + } + + // Update VGRContent resource status + pvList := &corev1.PersistentVolumeList{} + err = r.Client.List(ctx, pvList) + if err != nil { + logger.Error(err, "failed to list PVs") + return reconcile.Result{}, err + } - return ctrl.Result{}, nil + pvRefList := []corev1.LocalObjectReference{} + for _, pv := range pvList.Items { + if slices.ContainsFunc(instance.Spec.Source.VolumeHandles, func(handle string) bool { + return pv.Spec.CSI != nil && pv.Spec.CSI.VolumeHandle == handle + }) { + pvRefList = append(pvRefList, corev1.LocalObjectReference{ + Name: pv.Name, + }) + } + } + instance.Status.PersistentVolumeRefList = pvRefList + err = r.Client.Status().Update(ctx, instance) + if err != nil { + logger.Error(err, "failed to update VGRContent status") + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *VolumeGroupReplicationContentReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). For(&replicationv1alpha1.VolumeGroupReplicationContent{}). Complete(r) } + +func (r *VolumeGroupReplicationContentReconciler) getVolumeGroupClient(ctx context.Context, driverName string) (grpcClient.VolumeGroup, error) { + conn, err := r.Connpool.GetLeaderByDriver(ctx, r.Client, driverName) + if err != nil { + return nil, fmt.Errorf("no leader for the ControllerService of driver %q", driverName) + } + + for _, cap := range conn.Capabilities { + // validate if VOLUME_GROUP capability is supported by the driver. + if cap.GetVolumeGroup() == nil { + continue + } + + // validate of VOLUME_GROUP capability is enabled by the storage driver. + if cap.GetVolumeGroup().GetType() == identity.Capability_VolumeGroup_VOLUME_GROUP { + return grpcClient.NewVolumeGroupClient(conn.Client, r.Timeout), nil + } + } + + return nil, fmt.Errorf("leading CSIAddonsNode %q for driver %q does not support VolumeGroup", conn.Name, driverName) + +} + +// annotateVolumeGroupReplicationWithVGRContentOwner will add the VolumeGroupReplicationContent owner to the VGR annotations. +func (r *VolumeGroupReplicationContentReconciler) annotateVolumeGroupReplicationWithVGRContentOwner(ctx context.Context, logger logr.Logger, reqOwnerName string, vgr *replicationv1alpha1.VolumeGroupReplication) error { + if vgr.ObjectMeta.Annotations == nil { + vgr.ObjectMeta.Annotations = map[string]string{} + } + + currentOwnerName := vgr.ObjectMeta.Annotations[replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation] + if currentOwnerName == "" { + logger.Info("setting vgrcontent owner on VGR annotation", "Name", vgr.Name, "owner", reqOwnerName) + vgr.ObjectMeta.Annotations[replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation] = reqOwnerName + err := r.Update(ctx, vgr) + if err != nil { + logger.Error(err, "Failed to update VGR annotation", "Name", vgr.Name) + + return fmt.Errorf("failed to update VGR %q annotation for VolumeGroupReplicationContent: %w", + vgr.Name, err) + } + + return nil + } + + if currentOwnerName != reqOwnerName { + logger.Info("cannot change the owner of vgr", + "VGR name", vgr.Name, + "current owner", currentOwnerName, + "requested owner", reqOwnerName) + + return fmt.Errorf("VGRContent %q not owned by correct VolumeGroupReplication %q", + reqOwnerName, vgr.Name) + } + + return nil +} + +// removeVGRContentOwnerFromVGRAnnotation removes the VolumeGroupReplicationContent owner from the VGR annotations. +func (r *VolumeGroupReplicationContentReconciler) removeVGRContentOwnerFromVGRAnnotation(ctx context.Context, logger logr.Logger, vgr *replicationv1alpha1.VolumeGroupReplication) error { + if _, ok := vgr.ObjectMeta.Annotations[replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation]; ok { + logger.Info("removing vgrcontent owner annotation from VolumeGroupReplication object", "Annotation", replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation) + delete(vgr.ObjectMeta.Annotations, replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation) + if err := r.Client.Update(ctx, vgr); err != nil { + return fmt.Errorf("failed to remove annotation %q from VolumeGroupReplication "+ + "%q %w", + replicationv1alpha1.VolumeGroupReplicationContentNameAnnotation, vgr.Name, err) + } + } + + return nil +} diff --git a/internal/controller/replication.storage/volumereplication_controller.go b/internal/controller/replication.storage/volumereplication_controller.go index c96e43d9a..b131925a2 100644 --- a/internal/controller/replication.storage/volumereplication_controller.go +++ b/internal/controller/replication.storage/volumereplication_controller.go @@ -35,11 +35,8 @@ import ( "google.golang.org/grpc/codes" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -82,6 +79,7 @@ type VolumeReplicationReconciler struct { //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplications,verbs=get;list;watch //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplications/finalizers,verbs=update //+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationcontents,verbs=get;list;watch +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumegroupreplicationcontents/finalizers,verbs=update //+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/finalizers,verbs=update //+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch @@ -113,7 +111,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re vrcObj, err := r.getVolumeReplicationClass(logger, instance.Spec.VolumeReplicationClass) if err != nil { setFailureCondition(instance, "failed to get volumeReplication class", err.Error(), instance.Spec.DataSource.Kind) - uErr := r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), err.Error()) + uErr := r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), err.Error()) if uErr != nil { logger.Error(uErr, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -125,7 +123,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re if err != nil { logger.Error(err, "failed to validate parameters of volumeReplicationClass", "VRCName", instance.Spec.VolumeReplicationClass) setFailureCondition(instance, "failed to validate parameters of volumeReplicationClass", err.Error(), instance.Spec.DataSource.Kind) - uErr := r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), err.Error()) + uErr := r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), err.Error()) if uErr != nil { logger.Error(uErr, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -161,7 +159,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re if pvErr != nil { logger.Error(pvErr, "failed to get PVC", "PVCName", instance.Spec.DataSource.Name) setFailureCondition(instance, "failed to find PVC", pvErr.Error(), instance.Spec.DataSource.Name) - uErr := r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), pvErr.Error()) + uErr := r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), pvErr.Error()) if uErr != nil { logger.Error(uErr, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -176,7 +174,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re if vgrErr != nil { logger.Error(vgrErr, "failed to get VolumeGroupReplication", "VGRName", instance.Spec.DataSource.Name) setFailureCondition(instance, "failed to get VolumeGroupReplication", vgrErr.Error(), instance.Spec.DataSource.Name) - uErr := r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), vgrErr.Error()) + uErr := r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), vgrErr.Error()) if uErr != nil { logger.Error(uErr, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -188,7 +186,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re err = fmt.Errorf("unsupported datasource kind") logger.Error(err, "given kind not supported", "Kind", instance.Spec.DataSource.Kind) setFailureCondition(instance, "unsupported datasource", err.Error(), "") - uErr := r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), err.Error()) + uErr := r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), err.Error()) if uErr != nil { logger.Error(uErr, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -231,28 +229,31 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re } switch instance.Spec.DataSource.Kind { case pvcDataSource: - err = r.annotatePVCWithOwner(ctx, logger, req.Name, pvc) + reqOwner := fmt.Sprintf("%s/%s", instance.Name, instance.Namespace) + err = AnnotatePVCWithOwner(r.Client, logger, reqOwner, pvc, replicationv1alpha1.VolumeReplicationNameAnnotation) if err != nil { logger.Error(err, "Failed to annotate PVC owner") return ctrl.Result{}, err } - if err = r.addFinalizerToPVC(logger, pvc); err != nil { + if err = AddFinalizerToPVC(r.Client, logger, pvc, pvcReplicationFinalizer); err != nil { logger.Error(err, "Failed to add PersistentVolumeClaim finalizer") - - return reconcile.Result{}, err + return ctrl.Result{}, err } case volumeGroupReplicationDataSource: err = r.annotateVolumeGroupReplicationWithOwner(ctx, logger, req.Name, vgr) if err != nil { logger.Error(err, "Failed to annotate VolumeGroupReplication owner") - return ctrl.Result{}, err } - if err = r.addFinalizerToVGR(logger, vgr); err != nil { + if err = AddFinalizerToVGR(r.Client, logger, vgr); err != nil { logger.Error(err, "Failed to add VolumeGroupReplication finalizer") + return reconcile.Result{}, err + } + if err = AddFinalizerToVGRContent(r.Client, logger, vgrc, volumeReplicationFinalizer); err != nil { + logger.Error(err, "Failed to add VolumeReplication finalizer to VolumeGroupReplicationContent") return reconcile.Result{}, err } } @@ -266,27 +267,23 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re } switch instance.Spec.DataSource.Kind { case pvcDataSource: - if err = r.removeOwnerFromPVCAnnotation(ctx, logger, pvc); err != nil { + if err = RemoveOwnerFromPVCAnnotation(r.Client, logger, pvc, replicationv1alpha1.VolumeReplicationNameAnnotation); err != nil { logger.Error(err, "Failed to remove VolumeReplication annotation from PersistentVolumeClaim") - return reconcile.Result{}, err } - if err = r.removeFinalizerFromPVC(logger, pvc); err != nil { + if err = RemoveFinalizerFromPVC(r.Client, logger, pvc, pvcReplicationFinalizer); err != nil { logger.Error(err, "Failed to remove PersistentVolumeClaim finalizer") - return reconcile.Result{}, err } case volumeGroupReplicationDataSource: - if err = r.removeOwnerFromVGRAnnotation(ctx, logger, vgr); err != nil { - logger.Error(err, "Failed to remove VolumeReplication annotation from VolumeGroupReplication") - + if err = RemoveFinalizerFromVGRContent(r.Client, logger, vgrc, volumeReplicationFinalizer); err != nil { + logger.Error(err, "Failed to remove VolumeReplication finalizer from VolumeGroupReplicationContent") return reconcile.Result{}, err } - if err = r.removeFinalizerFromVGR(logger, vgr); err != nil { - logger.Error(err, "Failed to remove VolumeGroupReplication finalizer") - + if err = r.removeOwnerFromVGRAnnotation(ctx, logger, vgr); err != nil { + logger.Error(err, "Failed to remove VolumeReplication annotation from VolumeGroupReplication") return reconcile.Result{}, err } } @@ -324,7 +321,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re if err = r.enableReplication(vr); err != nil { logger.Error(err, "failed to enable replication") msg := replication.GetMessageFromError(err) - uErr := r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), msg) + uErr := r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), msg) if uErr != nil { logger.Error(uErr, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -344,7 +341,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re logger.Info("volume is not ready to use") // set the status.State to secondary as the // instance.Status.State is primary for the first time. - err = r.updateReplicationStatus(instance, logger, getReplicationState(instance), "volume is marked secondary and is degraded") + err = r.updateReplicationStatus(instance, logger, GetReplicationState(instance.Spec.ReplicationState), "volume is marked secondary and is degraded") if err != nil { return ctrl.Result{}, err } @@ -372,7 +369,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re replicationErr = fmt.Errorf("unsupported volume state") logger.Error(replicationErr, "given volume state is not supported", "ReplicationState", instance.Spec.ReplicationState) setFailureCondition(instance, "unsupported volume state", replicationErr.Error(), instance.Spec.DataSource.Kind) - err = r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), replicationErr.Error()) + err = r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), replicationErr.Error()) if err != nil { logger.Error(err, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -383,7 +380,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re if replicationErr != nil { msg := replication.GetMessageFromError(replicationErr) logger.Error(replicationErr, "failed to Replicate", "ReplicationState", instance.Spec.ReplicationState) - err = r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), msg) + err = r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), msg) if err != nil { logger.Error(err, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -396,13 +393,13 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re }, nil } - return ctrl.Result{}, replicationErr + return reconcile.Result{}, replicationErr } if requeueForResync { logger.Info("volume is not ready to use, requeuing for resync") - err = r.updateReplicationStatus(instance, logger, getCurrentReplicationState(instance), "volume is degraded") + err = r.updateReplicationStatus(instance, logger, GetCurrentReplicationState(instance.Status.State), "volume is degraded") if err != nil { logger.Error(err, "failed to update volumeReplication status", "VRName", instance.Name) } @@ -455,7 +452,7 @@ func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Re if instance.Spec.ReplicationState == replicationv1alpha1.Secondary { instance.Status.LastSyncTime = nil } - err = r.updateReplicationStatus(instance, logger, getReplicationState(instance), msg) + err = r.updateReplicationStatus(instance, logger, GetReplicationState(instance.Spec.ReplicationState), msg) if err != nil { return ctrl.Result{}, err } @@ -554,7 +551,6 @@ func (r *VolumeReplicationReconciler) updateReplicationStatus( func (r *VolumeReplicationReconciler) SetupWithManager(mgr ctrl.Manager, ctrlOptions controller.Options) error { err := r.waitForCrds() if err != nil { - return err } @@ -570,46 +566,21 @@ func (r *VolumeReplicationReconciler) SetupWithManager(mgr ctrl.Manager, ctrlOpt func (r *VolumeReplicationReconciler) waitForCrds() error { logger := log.FromContext(context.TODO(), "Name", "checkingDependencies") - err := r.waitForVolumeReplicationResource(logger, volumeReplicationClass) + err := WaitForVolumeReplicationResource(r.Client, logger, volumeReplicationClass) if err != nil { logger.Error(err, "failed to wait for VolumeReplicationClass CRD") - return err } - err = r.waitForVolumeReplicationResource(logger, volumeReplication) + err = WaitForVolumeReplicationResource(r.Client, logger, volumeReplication) if err != nil { logger.Error(err, "failed to wait for VolumeReplication CRD") - return err } return nil } -func (r *VolumeReplicationReconciler) waitForVolumeReplicationResource(logger logr.Logger, resourceName string) error { - unstructuredResource := &unstructured.UnstructuredList{} - unstructuredResource.SetGroupVersionKind(schema.GroupVersionKind{ - Group: replicationv1alpha1.GroupVersion.Group, - Kind: resourceName, - Version: replicationv1alpha1.GroupVersion.Version, - }) - for { - err := r.Client.List(context.TODO(), unstructuredResource) - if err == nil { - return nil - } - // return errors other than NoMatch - if !meta.IsNoMatchError(err) { - logger.Error(err, "got an unexpected error while waiting for resource", "Resource", resourceName) - - return err - } - logger.Info("resource does not exist", "Resource", resourceName) - time.Sleep(5 * time.Second) - } -} - // volumeReplicationInstance contains the attributes // that can be useful in reconciling a particular // instance of the VolumeReplication resource. @@ -783,27 +754,6 @@ func (r *VolumeReplicationReconciler) getVolumeReplicationInfo(vr *volumeReplica return infoResponse, nil } -func getReplicationState(instance *replicationv1alpha1.VolumeReplication) replicationv1alpha1.State { - switch instance.Spec.ReplicationState { - case replicationv1alpha1.Primary: - return replicationv1alpha1.PrimaryState - case replicationv1alpha1.Secondary: - return replicationv1alpha1.SecondaryState - case replicationv1alpha1.Resync: - return replicationv1alpha1.SecondaryState - } - - return replicationv1alpha1.UnknownState -} - -func getCurrentReplicationState(instance *replicationv1alpha1.VolumeReplication) replicationv1alpha1.State { - if instance.Status.State == "" { - return replicationv1alpha1.UnknownState - } - - return instance.Status.State -} - func setFailureCondition(instance *replicationv1alpha1.VolumeReplication, errMessage string, errFromCephCSI string, dataSource string) { switch instance.Spec.ReplicationState { case replicationv1alpha1.Primary: From 60c4c9bf9fe7236456c537b5a43119db3b567b54 Mon Sep 17 00:00:00 2001 From: Nikhil-Ladha Date: Mon, 8 Jul 2024 17:46:08 +0530 Subject: [PATCH 2/3] config: add generated changes for crds, rbacs add generated changes for crds, rbacs Signed-off-by: Nikhil-Ladha --- .../v1alpha1/zz_generated.deepcopy.go | 6 +++- ...ift.io_volumegroupreplicationcontents.yaml | 22 +++++------- ....openshift.io_volumegroupreplications.yaml | 7 ++-- config/rbac/role.yaml | 35 ++++++++++++++++--- deploy/controller/crds.yaml | 29 +++++++-------- deploy/controller/rbac.yaml | 35 ++++++++++++++++--- 6 files changed, 93 insertions(+), 41 deletions(-) diff --git a/api/replication.storage/v1alpha1/zz_generated.deepcopy.go b/api/replication.storage/v1alpha1/zz_generated.deepcopy.go index 565ae0f63..e97d46cc0 100644 --- a/api/replication.storage/v1alpha1/zz_generated.deepcopy.go +++ b/api/replication.storage/v1alpha1/zz_generated.deepcopy.go @@ -231,7 +231,11 @@ func (in *VolumeGroupReplicationContentSource) DeepCopy() *VolumeGroupReplicatio // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeGroupReplicationContentSpec) DeepCopyInto(out *VolumeGroupReplicationContentSpec) { *out = *in - out.VolumeGroupReplicationRef = in.VolumeGroupReplicationRef + if in.VolumeGroupReplicationRef != nil { + in, out := &in.VolumeGroupReplicationRef, &out.VolumeGroupReplicationRef + *out = new(corev1.ObjectReference) + **out = **in + } in.Source.DeepCopyInto(&out.Source) } diff --git a/config/crd/bases/replication.storage.openshift.io_volumegroupreplicationcontents.yaml b/config/crd/bases/replication.storage.openshift.io_volumegroupreplicationcontents.yaml index 425c651eb..e2cfd01a8 100644 --- a/config/crd/bases/replication.storage.openshift.io_volumegroupreplicationcontents.yaml +++ b/config/crd/bases/replication.storage.openshift.io_volumegroupreplicationcontents.yaml @@ -52,9 +52,8 @@ spec: type: string source: description: |- - Source specifies whether the snapshot is (or should be) dynamically provisioned + Source specifies whether the volume is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. - This field is immutable after creation. Required. properties: volumeHandles: @@ -71,6 +70,7 @@ spec: description: |- VolumeGroupReplicationClassName is the name of the VolumeGroupReplicationClass from which this group replication was (or will be) created. + Required. type: string volumeGroupReplicationHandle: description: |- @@ -83,10 +83,6 @@ spec: VolumeGroupReplicationContent object is bound. VolumeGroupReplication.Spec.VolumeGroupReplicationContentName field must reference to this VolumeGroupReplicationContent's name for the bidirectional binding to be valid. - For a pre-existing VolumeGroupReplicationContent object, name and namespace of the - VolumeGroupReplication object MUST be provided for binding to happen. - This field is immutable after creation. - Required. properties: apiVersion: description: API version of the referent. @@ -129,16 +125,16 @@ spec: type: object x-kubernetes-map-type: atomic x-kubernetes-validations: - - message: both volumeGroupReplicationRef.name and volumeGroupReplicationRef.namespace - must be set - rule: has(self.name) && has(self.__namespace__) - - message: volumeGroupReplicationRef is immutable - rule: self == oldSelf + - message: volumeGroupReplicationRef.name, volumeGroupReplicationRef.namespace + and volumeGroupReplicationRef.uid must be set if volumeGroupReplicationRef + is defined + rule: 'self != null ? has(self.name) && has(self.__namespace__) + && has(self.uid) : true' required: - provisioner - source + - volumeGroupReplicationClassName - volumeGroupReplicationHandle - - volumeGroupReplicationRef type: object status: description: VolumeGroupReplicationContentStatus defines the status of @@ -146,7 +142,7 @@ spec: properties: persistentVolumeRefList: description: |- - PersistentVolumeRefList is the list of of PV for the group replication + PersistentVolumeRefList is the list of PV for the group replication The maximum number of allowed PV in the group is 100. items: description: |- diff --git a/config/crd/bases/replication.storage.openshift.io_volumegroupreplications.yaml b/config/crd/bases/replication.storage.openshift.io_volumegroupreplications.yaml index 73cc47c9b..bd461e0ca 100644 --- a/config/crd/bases/replication.storage.openshift.io_volumegroupreplications.yaml +++ b/config/crd/bases/replication.storage.openshift.io_volumegroupreplications.yaml @@ -131,11 +131,12 @@ spec: - message: volumeGroupReplicationContentName is immutable rule: self == oldSelf volumeReplicationClassName: - description: volumeReplicationClassName is the volumeReplicationClass - name for VolumeReplication object + description: |- + volumeReplicationClassName is the volumeReplicationClass name for the VolumeReplication object + created for this volumeGroupReplication type: string x-kubernetes-validations: - - message: volumReplicationClassName is immutable + - message: volumeReplicationClassName is immutable rule: self == oldSelf volumeReplicationName: description: Name of the VolumeReplication object created for this diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e311fe0fc..99fb6aafe 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -8,7 +8,6 @@ rules: - "" resources: - namespaces - - persistentvolumes - pods verbs: - get @@ -30,6 +29,15 @@ rules: - persistentvolumeclaims/finalizers verbs: - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch - apiGroups: - coordination.k8s.io resources: @@ -82,11 +90,19 @@ rules: - get - patch - update +- apiGroups: + - replication.storage.openshift.io + resources: + - volumegroupreplicationclasses + - volumereplicationclasses + verbs: + - get + - list + - watch - apiGroups: - replication.storage.openshift.io resources: - volumegroupreplicationcontents - - volumegroupreplications verbs: - create - delete @@ -101,7 +117,6 @@ rules: - volumegroupreplicationcontents/finalizers - volumegroupreplications/finalizers - volumereplications/finalizers - - volumereplications/status verbs: - update - apiGroups: @@ -116,20 +131,32 @@ rules: - apiGroups: - replication.storage.openshift.io resources: - - volumereplicationclasses + - volumegroupreplications verbs: - get - list + - patch + - update - watch - apiGroups: - replication.storage.openshift.io resources: - volumereplications verbs: + - create + - delete - get - list - update - watch +- apiGroups: + - replication.storage.openshift.io + resources: + - volumereplications/status + verbs: + - get + - list + - update - apiGroups: - storage.k8s.io resources: diff --git a/deploy/controller/crds.yaml b/deploy/controller/crds.yaml index dec6ee0af..5cac931fc 100644 --- a/deploy/controller/crds.yaml +++ b/deploy/controller/crds.yaml @@ -1414,9 +1414,8 @@ spec: type: string source: description: |- - Source specifies whether the snapshot is (or should be) dynamically provisioned + Source specifies whether the volume is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. - This field is immutable after creation. Required. properties: volumeHandles: @@ -1433,6 +1432,7 @@ spec: description: |- VolumeGroupReplicationClassName is the name of the VolumeGroupReplicationClass from which this group replication was (or will be) created. + Required. type: string volumeGroupReplicationHandle: description: |- @@ -1445,10 +1445,6 @@ spec: VolumeGroupReplicationContent object is bound. VolumeGroupReplication.Spec.VolumeGroupReplicationContentName field must reference to this VolumeGroupReplicationContent's name for the bidirectional binding to be valid. - For a pre-existing VolumeGroupReplicationContent object, name and namespace of the - VolumeGroupReplication object MUST be provided for binding to happen. - This field is immutable after creation. - Required. properties: apiVersion: description: API version of the referent. @@ -1491,16 +1487,16 @@ spec: type: object x-kubernetes-map-type: atomic x-kubernetes-validations: - - message: both volumeGroupReplicationRef.name and volumeGroupReplicationRef.namespace - must be set - rule: has(self.name) && has(self.__namespace__) - - message: volumeGroupReplicationRef is immutable - rule: self == oldSelf + - message: volumeGroupReplicationRef.name, volumeGroupReplicationRef.namespace + and volumeGroupReplicationRef.uid must be set if volumeGroupReplicationRef + is defined + rule: 'self != null ? has(self.name) && has(self.__namespace__) + && has(self.uid) : true' required: - provisioner - source + - volumeGroupReplicationClassName - volumeGroupReplicationHandle - - volumeGroupReplicationRef type: object status: description: VolumeGroupReplicationContentStatus defines the status of @@ -1508,7 +1504,7 @@ spec: properties: persistentVolumeRefList: description: |- - PersistentVolumeRefList is the list of of PV for the group replication + PersistentVolumeRefList is the list of PV for the group replication The maximum number of allowed PV in the group is 100. items: description: |- @@ -1666,11 +1662,12 @@ spec: - message: volumeGroupReplicationContentName is immutable rule: self == oldSelf volumeReplicationClassName: - description: volumeReplicationClassName is the volumeReplicationClass - name for VolumeReplication object + description: |- + volumeReplicationClassName is the volumeReplicationClass name for the VolumeReplication object + created for this volumeGroupReplication type: string x-kubernetes-validations: - - message: volumReplicationClassName is immutable + - message: volumeReplicationClassName is immutable rule: self == oldSelf volumeReplicationName: description: Name of the VolumeReplication object created for this diff --git a/deploy/controller/rbac.yaml b/deploy/controller/rbac.yaml index 54e31b8ce..8a4498228 100644 --- a/deploy/controller/rbac.yaml +++ b/deploy/controller/rbac.yaml @@ -101,7 +101,6 @@ rules: - "" resources: - namespaces - - persistentvolumes - pods verbs: - get @@ -123,6 +122,15 @@ rules: - persistentvolumeclaims/finalizers verbs: - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch - apiGroups: - coordination.k8s.io resources: @@ -175,11 +183,19 @@ rules: - get - patch - update +- apiGroups: + - replication.storage.openshift.io + resources: + - volumegroupreplicationclasses + - volumereplicationclasses + verbs: + - get + - list + - watch - apiGroups: - replication.storage.openshift.io resources: - volumegroupreplicationcontents - - volumegroupreplications verbs: - create - delete @@ -194,7 +210,6 @@ rules: - volumegroupreplicationcontents/finalizers - volumegroupreplications/finalizers - volumereplications/finalizers - - volumereplications/status verbs: - update - apiGroups: @@ -209,20 +224,32 @@ rules: - apiGroups: - replication.storage.openshift.io resources: - - volumereplicationclasses + - volumegroupreplications verbs: - get - list + - patch + - update - watch - apiGroups: - replication.storage.openshift.io resources: - volumereplications verbs: + - create + - delete - get - list - update - watch +- apiGroups: + - replication.storage.openshift.io + resources: + - volumereplications/status + verbs: + - get + - list + - update - apiGroups: - storage.k8s.io resources: From 9c3a4caa6c931977de3ac26cbbb1aefb70db9037 Mon Sep 17 00:00:00 2001 From: Nikhil-Ladha Date: Thu, 25 Jul 2024 16:01:45 +0530 Subject: [PATCH 3/3] docs: add docs for volumegroupreplication CR add docs for volumegroupreplication and volumegroupreplicationclass CRs. Signed-off-by: Nikhil-Ladha --- docs/volumegroupreplication.md | 37 +++++++++++++++++++++++++++ docs/volumegroupreplicationclass.md | 24 +++++++++++++++++ docs/volumegroupreplicationcontent.md | 31 ++++++++++++++++++++++ docs/volumereplicationclass.md | 2 +- 4 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 docs/volumegroupreplication.md create mode 100644 docs/volumegroupreplicationclass.md create mode 100644 docs/volumegroupreplicationcontent.md diff --git a/docs/volumegroupreplication.md b/docs/volumegroupreplication.md new file mode 100644 index 000000000..406ca3ca3 --- /dev/null +++ b/docs/volumegroupreplication.md @@ -0,0 +1,37 @@ +# VolumeGroupReplication + +VolumeGroupReplication is a namespaced resource that contains references to storage object to be grouped and replicated, VolumeGroupReplicationClass corresponding to the driver providing replication, VolumeGroupContent and VolumeReplication CRs. + +`volumeGroupReplicationClassName` is the name of the class providing group replication. + +`volumeReplicationClassName` is the name of the class providing the replication for volumeReplication CR. + +`volumeReplicationName` is the name of the volumeReplication CR created by this volumeGroupReplication CR + +`volumeGroupReplicationContentName` is the name of the volumeGroupReplicationConten CR created by this volumeGroupReplication CR. + +`replicationState` is the state of the volume being referenced. Possible values are `primary`, `secondary` and `resync`. + +- `primary` denotes that the volume is primary +- `secondary` denotes that the volume is secondary +- `resync` denotes that the volume needs to be resynced + +`source` contains the source of the volumeGroupReplication i.e, the selectors to match the PVC/PVs to be replicated. + +- `selector` is a label selector to filter the pvcs that are to be included in the group replication + +```yaml +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeGroupReplication +metadata: + name: volumegroupreplication-sample + namespace: default +spec: + volumeReplicationClassName: volumereplicationclass-sample + volumeGroupReplicationClassName: volumegroupreplicationclass-sample + replicationState: primary + source: + selector: + matchLabels: + group: replication +``` diff --git a/docs/volumegroupreplicationclass.md b/docs/volumegroupreplicationclass.md new file mode 100644 index 000000000..969fef727 --- /dev/null +++ b/docs/volumegroupreplicationclass.md @@ -0,0 +1,24 @@ +# VolumeGroupReplicationClass + +VolumeGroupReplicationClass is a cluster scoped resource that contains driver related configuration parameters for volume group replication. + +`provisioner` is name of the storage provisioner. + +`parameters` contains key-value pairs that are passed down to the driver. Users can add their own key-value pairs. Keys with `replication.storage.openshift.io/` prefix are reserved by operator and not passed down to the driver. + +## Reserved parameter keys + +- `replication.storage.openshift.io/group-replication-secret-name` +- `replication.storage.openshift.io/group-replication-secret-namespace` + +```yaml +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeGroupReplicationClass +metadata: + name: volumegroupreplicationclass-sample +spec: + provisioner: example.provisioner.io + parameters: + replication.storage.openshift.io/group-replication-secret-name: secret-name + replication.storage.openshift.io/group-replication-secret-namespace: secret-namespace +``` diff --git a/docs/volumegroupreplicationcontent.md b/docs/volumegroupreplicationcontent.md new file mode 100644 index 000000000..b44131ac5 --- /dev/null +++ b/docs/volumegroupreplicationcontent.md @@ -0,0 +1,31 @@ +# VolumeGroupReplicationContent + +VolumeGroupReplicationContent is a cluster scoped resource that contains volume grouping related information. + +`volumeGroupReplicationRef` contains object reference of the volumeGroupReplication resource that created this resource. + +`volumeGroupReplicationHandle` (optional) is an existing (but new) group replication ID. + +`volumeGroupReplicationClassName` is the name of the VolumeGroupReplicationClass that contains the driver related info +for volume grouping. + +`source` (optional) contains the VolumeGroupReplicationContentSource struct. + +- `VolumeHandles` is the list of volume handles that this resource is responsible for grouping. + +```yaml +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeGroupReplicationContent +metadata: + name: volumegroupreplicationcontent-sample +spec: + volumeGroupReplicationRef: + kind: VolumeGroupReplication + name: volumegroupreplication-sample + namespace: default + volumeGroupReplicationClassName: volumegroupreplicationclass-sample + provisioner: example.provisioner.io + source: + volumeHandles: + - myPersistentVolumeHandle +``` diff --git a/docs/volumereplicationclass.md b/docs/volumereplicationclass.md index 10da62803..fc9420ce5 100644 --- a/docs/volumereplicationclass.md +++ b/docs/volumereplicationclass.md @@ -1,6 +1,6 @@ # VolumeReplicationClass -`VolumeReplicationClass` is a cluster scoped resource that contains driver related configuration parameters. +VolumeReplicationClass is a cluster scoped resource that contains driver related configuration parameters. `provisioner` is name of the storage provisioner.