diff --git a/controllers/storageclassrequest/storageclassrequest_controller.go b/controllers/storageclassrequest/storageclassrequest_controller.go index 0f12ad14f2..3fed45c8eb 100644 --- a/controllers/storageclassrequest/storageclassrequest_controller.go +++ b/controllers/storageclassrequest/storageclassrequest_controller.go @@ -21,7 +21,6 @@ import ( "strings" "github.com/go-logr/logr" - "github.com/google/uuid" snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" v1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" "github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1" @@ -60,6 +59,7 @@ type StorageClassRequestReconciler struct { storageCluster *v1.StorageCluster StorageClassRequest *v1alpha1.StorageClassRequest cephBlockPool *rookCephv1.CephBlockPool + cephRadosNamespace *rookCephv1.CephBlockPoolRadosNamespace cephFilesystemSubVolumeGroup *rookCephv1.CephFilesystemSubVolumeGroup cephClientProvisioner *rookCephv1.CephClient cephClientNode *rookCephv1.CephClient @@ -139,6 +139,15 @@ func (r *StorageClassRequestReconciler) Reconcile(ctx context.Context, request r func (r *StorageClassRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := mgr.GetCache().IndexField( + context.TODO(), + &rookCephv1.CephBlockPoolRadosNamespace{}, + util.OwnerUIDIndexName, + util.OwnersIndexFieldFunc, + ); err != nil { + return fmt.Errorf("unable to set up FieldIndexer on CephBlockPoolRadosNamespaces for owner reference UIDs: %v", err) + } + if err := mgr.GetCache().IndexField( context.TODO(), &rookCephv1.CephFilesystemSubVolumeGroup{}, @@ -163,11 +172,13 @@ func (r *StorageClassRequestReconciler) SetupWithManager(mgr ctrl.Manager) error return []reconcile.Request{} }) enqueueForOwner := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &v1alpha1.StorageClassRequest{}) + generationChangedPredicate := builder.WithPredicates(predicate.GenerationChangedPredicate{}) + return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.StorageClassRequest{}, builder.WithPredicates( predicate.GenerationChangedPredicate{}, )). - Watches(&rookCephv1.CephBlockPool{}, enqueueForOwner). + Owns(&rookCephv1.CephBlockPoolRadosNamespace{}, generationChangedPredicate). Watches(&rookCephv1.CephFilesystemSubVolumeGroup{}, enqueueForOwner). Watches(&rookCephv1.CephClient{}, enqueueForOwner). Watches(&storagev1.StorageClass{}, enqueueStorageConsumerRequest). @@ -213,23 +224,53 @@ func (r *StorageClassRequestReconciler) initPhase(storageProfile *v1.StorageProf // check request status already contains the name of the resource. if not, add it. if r.StorageClassRequest.Spec.Type == "blockpool" { + // initialize in-memory structs + r.cephRadosNamespace = &rookCephv1.CephBlockPoolRadosNamespace{} + r.cephRadosNamespace.Namespace = r.OperatorNamespace r.cephBlockPool = &rookCephv1.CephBlockPool{} r.cephBlockPool.Namespace = r.OperatorNamespace - for _, res := range r.StorageClassRequest.Status.CephResources { - if res.Kind == "CephBlockPool" { - r.cephBlockPool.Name = res.Name - break + + // check if a CephBlockPoolRadosNamespace resource exists for the desired storageconsumer and storageprofile. + cephRadosNamespaceList := &rookCephv1.CephBlockPoolRadosNamespaceList{} + err := r.Client.List( + r.ctx, + cephRadosNamespaceList, + client.InNamespace(r.OperatorNamespace), + client.MatchingFields{util.OwnerUIDIndexName: string(r.StorageClassRequest.UID)}) + if err != nil { + return err + } + + // if we found no CephBlockPoolRadosNamespaces, generate a new name + // if we found only one CephBlockPoolRadosNamespace with our query, we're good + // if we found more than one CephBlockPoolRadosNamespace, we can't determine which one to select, so error out + rnsItemsLen := len(cephRadosNamespaceList.Items) + if rnsItemsLen == 0 { + rnsNewName := fmt.Sprintf("cephradosnamespace-%s-%s", r.StorageClassRequest.Spec.StorageProfile, r.storageConsumer.Name) + r.log.V(1).Info("no valid CephBlockPoolRadosNamespace found, creating new one", "CephBlockPoolRadosNamespace", rnsNewName) + r.cephRadosNamespace.Name = rnsNewName + } else if rnsItemsLen == 1 { + cephRns := cephRadosNamespaceList.Items[0] + if r.cephBlockPool.Name != "" && cephRns.Spec.BlockPoolName != r.cephBlockPool.Name { + return fmt.Errorf("found CephBlockPoolRadosNamespace %q with BlockPoolName %q, but CephResources lists CephBlockPool %q", cephRns.Name, cephRns.Spec.BlockPoolName, r.cephBlockPool.Name) } + r.cephBlockPool.Name = cephRns.Spec.BlockPoolName + r.cephRadosNamespace.Name = cephRns.GetName() + r.log.V(1).Info("valid CephBlockPoolRadosNamespace found", "CephBlockPoolRadosNamespace", r.cephRadosNamespace.Name) + } else { + return fmt.Errorf("invalid number of CephBlockPoolRadosNamespaces for storage consumer %q and storage profile %q: found %d, expecting 0 or 1", r.storageConsumer.Name, profileName, rnsItemsLen) } - // check if a cephblockpool resource exists for the desired storageconsumer and storageprofile. + // check if a cephblockpool resource exists for the desired storageprofile. if r.cephBlockPool.Name == "" { cephBlockPoolList := &rookCephv1.CephBlockPoolList{} - listOptions := &client.MatchingLabels{ - controllers.StorageConsumerNameLabel: r.storageConsumer.Name, - controllers.StorageProfileSpecLabel: storageProfile.GetSpecHash(), + listOptions := []client.ListOption{ + client.InNamespace(r.OperatorNamespace), + &client.MatchingLabels{ + controllers.StorageProfileSpecLabel: storageProfile.GetSpecHash(), + }, } - if err := r.list(cephBlockPoolList, client.InNamespace(r.OperatorNamespace), listOptions); err != nil { + if err := r.list(cephBlockPoolList, listOptions...); err != nil { return err } @@ -238,17 +279,19 @@ func (r *StorageClassRequestReconciler) initPhase(storageProfile *v1.StorageProf // if we found more than one CephBlockPool, we can't determine which one to select, so error out cbpItemsLen := len(cephBlockPoolList.Items) if cbpItemsLen == 0 { - cbpNewName := fmt.Sprintf("cephblockpool-%s-%s", r.storageConsumer.Name, generateUUID()) + cbpNewName := fmt.Sprintf("cephblockpool-profile-%s", storageProfile.Name) r.log.V(1).Info("no valid CephBlockPool found, creating new one", "CephBlockPool", cbpNewName) r.cephBlockPool.Name = cbpNewName } else if cbpItemsLen == 1 { r.cephBlockPool.Name = cephBlockPoolList.Items[0].GetName() r.log.V(1).Info("valid CephBlockPool found", "CephBlockPool", r.cephBlockPool.Name) } else { - return fmt.Errorf("invalid number of CephBlockPools for storage consumer %q and storage profile %q: found %d, expecting 0 or 1", r.storageConsumer.Name, storageProfile.Name, cbpItemsLen) + return fmt.Errorf("invalid number of CephBlockPools for storage profile %q: found %d, expecting 0 or 1", storageProfile.Name, cbpItemsLen) } } + r.cephRadosNamespace.Spec.BlockPoolName = r.cephBlockPool.Name + } else if r.StorageClassRequest.Spec.Type == "sharedfilesystem" { r.cephFilesystemSubVolumeGroup = &rookCephv1.CephFilesystemSubVolumeGroup{} r.cephFilesystemSubVolumeGroup.Namespace = r.OperatorNamespace @@ -321,6 +364,10 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err return reconcile.Result{}, err } + if err := r.reconcileRadosNamespace(&storageProfile); err != nil { + return reconcile.Result{}, err + } + } else if r.StorageClassRequest.Spec.Type == "sharedfilesystem" { if err := r.reconcileCephClientCephFSProvisioner(); err != nil { return reconcile.Result{}, err @@ -417,6 +464,45 @@ func (r *StorageClassRequestReconciler) reconcileCephBlockPool(storageProfile *v return nil } +func (r *StorageClassRequestReconciler) reconcileRadosNamespace(storageProfile *v1.StorageProfile) error { + _, err := ctrl.CreateOrUpdate(r.ctx, r.Client, r.cephRadosNamespace, func() error { + if err := r.own(r.cephRadosNamespace); err != nil { + return err + } + + addLabel(r.cephRadosNamespace, controllers.StorageConsumerNameLabel, r.storageConsumer.Name) + addLabel(r.cephBlockPool, controllers.StorageProfileSpecLabel, storageProfile.GetSpecHash()) + + r.cephRadosNamespace.Spec = rookCephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: r.cephBlockPool.Name, + } + return nil + }) + + if err != nil { + r.log.Error( + err, + "Failed to update CephBlockPoolRadosNamespace.", + "CephBlockPoolRadosNamespace", + klog.KRef(r.cephRadosNamespace.Namespace, r.cephRadosNamespace.Name), + ) + return err + } + + cephClients := map[string]string{ + "provisioner": r.cephClientProvisioner.Name, + "node": r.cephClientNode.Name, + } + phase := "" + if r.cephRadosNamespace.Status != nil { + phase = string(r.cephRadosNamespace.Status.Phase) + } + + r.setCephResourceStatus(r.cephRadosNamespace.Name, "CephBlockPoolRadosNamespace", phase, cephClients) + + return nil +} + func (r *StorageClassRequestReconciler) reconcileCephFilesystemSubVolumeGroup(storageProfile *v1.StorageProfile) error { cephFilesystem := rookCephv1.CephFilesystem{ @@ -491,9 +577,9 @@ func (r *StorageClassRequestReconciler) reconcileCephClientRBDProvisioner() erro addStorageRelatedAnnotations(r.cephClientProvisioner, r.getNamespacedName(), "rbd", "provisioner") r.cephClientProvisioner.Spec = rookCephv1.ClientSpec{ Caps: map[string]string{ - "mon": "profile rbd", + "mon": "profile rbd, allow command 'osd blocklist'", "mgr": "allow rw", - "osd": fmt.Sprintf("profile rbd pool=%s", r.cephBlockPool.Name), + "osd": fmt.Sprintf("profile rbd pool=%s namespace=%s", r.cephBlockPool.Name, r.cephRadosNamespace.Name), }, } return nil @@ -529,7 +615,7 @@ func (r *StorageClassRequestReconciler) reconcileCephClientRBDNode() error { Caps: map[string]string{ "mon": "profile rbd", "mgr": "allow rw", - "osd": fmt.Sprintf("profile rbd pool=%s", r.cephBlockPool.Name), + "osd": fmt.Sprintf("profile rbd pool=%s namespace=%s", r.cephBlockPool.Name, r.cephRadosNamespace.Name), }, } @@ -566,7 +652,7 @@ func (r *StorageClassRequestReconciler) reconcileCephClientCephFSProvisioner() e addStorageRelatedAnnotations(r.cephClientProvisioner, r.getNamespacedName(), "cephfs", "provisioner") r.cephClientProvisioner.Spec = rookCephv1.ClientSpec{ Caps: map[string]string{ - "mon": "allow r", + "mon": "allow r, allow command 'osd blocklist'", "mgr": "allow rw", "mds": fmt.Sprintf("allow rw path=/volumes/%s", r.cephFilesystemSubVolumeGroup.Name), "osd": "allow rw tag cephfs metadata=*", @@ -690,9 +776,3 @@ func addLabel(obj metav1.Object, key string, value string) { } labels[key] = value } - -// generateUUID generates a random UUID string and return first 8 characters. -func generateUUID() string { - newUUID := uuid.New().String() - return newUUID[:8] -} diff --git a/controllers/storageclassrequest/storageclassrequest_controller_test.go b/controllers/storageclassrequest/storageclassrequest_controller_test.go index 42d1c3f139..7c10e00e1d 100644 --- a/controllers/storageclassrequest/storageclassrequest_controller_test.go +++ b/controllers/storageclassrequest/storageclassrequest_controller_test.go @@ -179,6 +179,7 @@ func createFakeReconciler(t *testing.T) StorageClassRequestReconciler { func newFakeClientBuilder(scheme *runtime.Scheme) *fake.ClientBuilder { return fake.NewClientBuilder(). WithScheme(scheme). + WithIndex(&rookCephv1.CephBlockPoolRadosNamespace{}, util.OwnerUIDIndexName, util.OwnersIndexFieldFunc). WithIndex(&rookCephv1.CephFilesystemSubVolumeGroup{}, util.OwnerUIDIndexName, util.OwnersIndexFieldFunc) } @@ -495,7 +496,7 @@ func TestCephBlockPool(t *testing.T) { label: "No CephBlockPool exists", }, { - label: "Valid CephBlockPool exists", + label: "Valid CephBlockPool and RadosNamespace exist", expectedPoolName: "test-blockpool", createObjects: []runtime.Object{ &rookCephv1.CephBlockPool{ @@ -503,8 +504,7 @@ func TestCephBlockPool(t *testing.T) { Name: "test-blockpool", Namespace: "test-ns", Labels: map[string]string{ - controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name, - controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), + controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), }, }, }, @@ -513,25 +513,51 @@ func TestCephBlockPool(t *testing.T) { Name: "test-blockpool2", Namespace: "test-ns", Labels: map[string]string{ - controllers.StorageConsumerNameLabel: "wrongConsumer", - controllers.StorageProfileSpecLabel: "0123456789", + controllers.StorageProfileSpecLabel: "0123456789", + }, + }, + }, + &rookCephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cephradosnamespace-test-blockpool-test-consumer", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{ + { + UID: storageClassRequestUID, + }, }, }, + Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: "test-blockpool", + }, }, }, }, { - label: "Valid CephBlockPool only exists for different consumer/profile", + label: "Valid CephBlockPool and RadosNamespace only exist for different profile", createObjects: []runtime.Object{ &rookCephv1.CephBlockPool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-blockpool", Namespace: "test-ns", Labels: map[string]string{ - controllers.StorageConsumerNameLabel: "wrongConsumer", - controllers.StorageProfileSpecLabel: "0123456789", + controllers.StorageProfileSpecLabel: "0123456789", + }, + }, + }, + &rookCephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cephradosnamespace-test-blockpool-test-consumer", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "0123456789", + }, }, }, + Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: "test-blockpool", + }, }, }, }, @@ -544,8 +570,7 @@ func TestCephBlockPool(t *testing.T) { Name: "test-blockpool", Namespace: "test-ns", Labels: map[string]string{ - controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name, - controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), + controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), }, }, }, @@ -554,21 +579,67 @@ func TestCephBlockPool(t *testing.T) { Name: "test-blockpool2", Namespace: "test-ns", Labels: map[string]string{ - controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name, - controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), + controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), + }, + }, + }, + }, + }, + { + label: "More than one valid RadosNamespace exists", + failureExpected: true, + createObjects: []runtime.Object{ + &rookCephv1.CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-blockpool", + Namespace: "test-ns", + Labels: map[string]string{ + controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), + }, + }, + }, + &rookCephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-blockpool-rns", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{ + { + UID: storageClassRequestUID, + }, + }, + }, + Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: "test-blockpool", + }, + }, + &rookCephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-blockpool-rns2", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{ + { + UID: storageClassRequestUID, + }, }, }, + Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: "test-blockpool", + }, }, }, }, { - label: "Request status already has valid CephResource", + label: "Request status has existing CephBlockPool and inextant RadosNamepace", expectedPoolName: "test-blockpool", cephResources: []*v1alpha1.CephResourcesSpec{ { Name: "test-blockpool", Kind: "CephBlockPool", }, + { + Name: "test-blockpool-rns", + Kind: "CephBlockPoolRadosNamespace", + }, }, createObjects: []runtime.Object{ &rookCephv1.CephBlockPool{ @@ -576,21 +647,40 @@ func TestCephBlockPool(t *testing.T) { Name: "test-blockpool", Namespace: "test-ns", Labels: map[string]string{ - controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name, - controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), + controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(), }, }, }, }, }, { - label: "Request status has CephResource that doesn't exist", + label: "Request status has existing RadosNamespace and inextant CephBlockPool", expectedPoolName: "test-blockpool", cephResources: []*v1alpha1.CephResourcesSpec{ { Name: "test-blockpool", Kind: "CephBlockPool", }, + { + Name: "test-blockpool-rns", + Kind: "CephBlockPoolRadosNamespace", + }, + }, + createObjects: []runtime.Object{ + &rookCephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-blockpool-rns", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{ + { + UID: storageClassRequestUID, + }, + }, + }, + Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: "test-blockpool", + }, + }, }, }, } @@ -628,6 +718,30 @@ func TestCephBlockPool(t *testing.T) { } else { assert.Equal(t, c.expectedPoolName, r.cephBlockPool.Name, caseLabel) } + + // The generated CephBlockPoolRadosNamespace name is expected + // to be deterministic, so hard-coding the name generation in + // the test to guard against unintentional changes. + expectedRadosNamespaceName := fmt.Sprintf("cephradosnamespace-%s-%s", r.cephBlockPool.Name, r.storageConsumer.Name) + for _, cephRes := range c.cephResources { + if cephRes.Kind == "CephBlockPoolRadosNamespace" { + expectedRadosNamespaceName = cephRes.Name + break + } + } + expectedRadosNamespace := &rookCephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: expectedRadosNamespaceName, + Namespace: r.cephRadosNamespace.Namespace, + }, + } + + assert.NotEmpty(t, r.cephRadosNamespace, caseLabel) + assert.Equal(t, expectedRadosNamespaceName, r.cephRadosNamespace.Name, caseLabel) + assert.Equal(t, r.cephBlockPool.Name, r.cephRadosNamespace.Spec.BlockPoolName, caseLabel) + + err = r.get(expectedRadosNamespace) + assert.NoError(t, err, caseLabel) } caseCounter++ diff --git a/controllers/storageconsumer/storageconsumer_controller.go b/controllers/storageconsumer/storageconsumer_controller.go index 8a26b2788d..c8ef6703f9 100644 --- a/controllers/storageconsumer/storageconsumer_controller.go +++ b/controllers/storageconsumer/storageconsumer_controller.go @@ -44,6 +44,7 @@ const ( StorageConsumerAnnotation = "ocs.openshift.io.storageconsumer" StorageRequestAnnotation = "ocs.openshift.io.storagerequest" StorageCephUserTypeAnnotation = "ocs.openshift.io.cephusertype" + StorageProfileLabel = "ocs.openshift.io/storageprofile" StorageProfileSpecLabel = "ocs.openshift.io/storageprofile-spec" ConsumerUUIDLabel = "ocs.openshift.io/storageconsumer-uuid" StorageConsumerNameLabel = "ocs.openshift.io/storageconsumer-name" diff --git a/services/provider/server/server.go b/services/provider/server/server.go index 8c8098f38d..56896af182 100644 --- a/services/provider/server/server.go +++ b/services/provider/server/server.go @@ -577,6 +577,24 @@ func (s *OCSProviderServer) GetStorageClassClaimConfig(ctx context.Context, req }) case "CephBlockPool": + storageClassName = "ceph-rbd" + storageClassData["clusterID"] = s.namespace + + // Check if the StorageClassRequest has an associated RADOS Namespace + for _, rnsRes := range storageClassRequest.Status.CephResources { + if rnsRes.Kind == "CephBlockPoolRadosNamespace" { + rns := &rookCephv1.CephBlockPoolRadosNamespace{} + err = s.client.Get(ctx, types.NamespacedName{Name: rnsRes.Name, Namespace: s.namespace}, rns) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get %s CephBlockPoolRadosNamespace. %v", rnsRes.Name, err) + } + storageClassData["clusterID"] = rns.Status.Info["clusterID"] + storageClassData["radosnamespace"] = rns.Name + cephRes.CephClients = rnsRes.CephClients + break + } + } + nodeCephClientSecret, _, err := s.getCephClientInformation(ctx, cephRes.CephClients["node"]) if err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -587,8 +605,6 @@ func (s *OCSProviderServer) GetStorageClassClaimConfig(ctx context.Context, req return nil, status.Error(codes.Internal, err.Error()) } - storageClassName = "ceph-rbd" - storageClassData["clusterID"] = s.namespace storageClassData["pool"] = cephRes.Name storageClassData["imageFeatures"] = "layering,deep-flatten,exclusive-lock,object-map,fast-diff" storageClassData["csi.storage.k8s.io/fstype"] = "ext4" @@ -605,13 +621,10 @@ func (s *OCSProviderServer) GetStorageClassClaimConfig(ctx context.Context, req Name: "ceph-rbd", Kind: "VolumeSnapshotClass", Data: mustMarshal(map[string]string{ - "clusterID": s.namespace, + "clusterID": storageClassData["clusterID"], "csi.storage.k8s.io/snapshotter-secret-name": provisionerCephClientSecret, })}) - case "CephBlockPoolRadosNamespace": - storageClassData["radosnamespace"] = cephRes.Name - case "CephFilesystemSubVolumeGroup": subVolumeGroup := &rookCephv1.CephFilesystemSubVolumeGroup{} err := s.client.Get(ctx, types.NamespacedName{Name: cephRes.Name, Namespace: s.namespace}, subVolumeGroup)