diff --git a/controllers/storageclassrequest/storageclassrequest_controller.go b/controllers/storageclassrequest/storageclassrequest_controller.go
index 0f12ad14f2..5396b0b7c4 100644
--- a/controllers/storageclassrequest/storageclassrequest_controller.go
+++ b/controllers/storageclassrequest/storageclassrequest_controller.go
@@ -21,7 +21,6 @@ import (
 	"strings"
 
 	"github.com/go-logr/logr"
-	"github.com/google/uuid"
 	snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
 	v1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
 	"github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1"
@@ -60,6 +59,7 @@ type StorageClassRequestReconciler struct {
 	storageCluster               *v1.StorageCluster
 	StorageClassRequest          *v1alpha1.StorageClassRequest
 	cephBlockPool                *rookCephv1.CephBlockPool
+	cephRadosNamespace           *rookCephv1.CephBlockPoolRadosNamespace
 	cephFilesystemSubVolumeGroup *rookCephv1.CephFilesystemSubVolumeGroup
 	cephClientProvisioner        *rookCephv1.CephClient
 	cephClientNode               *rookCephv1.CephClient
@@ -139,6 +139,15 @@ func (r *StorageClassRequestReconciler) Reconcile(ctx context.Context, request r
 
 func (r *StorageClassRequestReconciler) SetupWithManager(mgr ctrl.Manager) error {
 
+	if err := mgr.GetCache().IndexField(
+		context.TODO(),
+		&rookCephv1.CephBlockPoolRadosNamespace{},
+		util.OwnerUIDIndexName,
+		util.OwnersIndexFieldFunc,
+	); err != nil {
+		return fmt.Errorf("unable to set up FieldIndexer on CephBlockPoolRadosNamespaces for owner reference UIDs: %v", err)
+	}
+
 	if err := mgr.GetCache().IndexField(
 		context.TODO(),
 		&rookCephv1.CephFilesystemSubVolumeGroup{},
@@ -168,6 +177,7 @@ func (r *StorageClassRequestReconciler) SetupWithManager(mgr ctrl.Manager) error
 			predicate.GenerationChangedPredicate{},
 		)).
 		Watches(&rookCephv1.CephBlockPool{}, enqueueForOwner).
+		Watches(&rookCephv1.CephBlockPoolRadosNamespace{}, enqueueForOwner).
 		Watches(&rookCephv1.CephFilesystemSubVolumeGroup{}, enqueueForOwner).
 		Watches(&rookCephv1.CephClient{}, enqueueForOwner).
 		Watches(&storagev1.StorageClass{}, enqueueStorageConsumerRequest).
@@ -222,14 +232,25 @@ func (r *StorageClassRequestReconciler) initPhase(storageProfile *v1.StorageProf
 			}
 		}
 
-		// check if a cephblockpool resource exists for the desired storageconsumer and storageprofile.
+		r.cephRadosNamespace = &rookCephv1.CephBlockPoolRadosNamespace{}
+		r.cephRadosNamespace.Namespace = r.OperatorNamespace
+		for _, res := range r.StorageClassRequest.Status.CephResources {
+			if res.Kind == "CephBlockPoolRadosNamespace" {
+				r.cephRadosNamespace.Name = res.Name
+				break
+			}
+		}
+
+		// check if a cephblockpool resource exists for the desired storageprofile.
 		if r.cephBlockPool.Name == "" {
 			cephBlockPoolList := &rookCephv1.CephBlockPoolList{}
-			listOptions := &client.MatchingLabels{
-				controllers.StorageConsumerNameLabel: r.storageConsumer.Name,
-				controllers.StorageProfileSpecLabel:  storageProfile.GetSpecHash(),
+			listOptions := []client.ListOption{
+				client.InNamespace(r.OperatorNamespace),
+				&client.MatchingLabels{
+					controllers.StorageProfileSpecLabel: storageProfile.GetSpecHash(),
+				},
 			}
-			if err := r.list(cephBlockPoolList, client.InNamespace(r.OperatorNamespace), listOptions); err != nil {
+			if err := r.list(cephBlockPoolList, listOptions...); err != nil {
 				return err
 			}
 
@@ -238,17 +259,51 @@ func (r *StorageClassRequestReconciler) initPhase(storageProfile *v1.StorageProf
 			// if we found more than one CephBlockPool, we can't determine which one to select, so error out
 			cbpItemsLen := len(cephBlockPoolList.Items)
 			if cbpItemsLen == 0 {
-				cbpNewName := fmt.Sprintf("cephblockpool-%s-%s", r.storageConsumer.Name, generateUUID())
+				cbpNewName := fmt.Sprintf("cephblockpool-profile-%s", storageProfile.Name)
 				r.log.V(1).Info("no valid CephBlockPool found, creating new one", "CephBlockPool", cbpNewName)
 				r.cephBlockPool.Name = cbpNewName
 			} else if cbpItemsLen == 1 {
 				r.cephBlockPool.Name = cephBlockPoolList.Items[0].GetName()
 				r.log.V(1).Info("valid CephBlockPool found", "CephBlockPool", r.cephBlockPool.Name)
 			} else {
-				return fmt.Errorf("invalid number of CephBlockPools for storage consumer %q and storage profile %q: found %d, expecting 0 or 1", r.storageConsumer.Name, storageProfile.Name, cbpItemsLen)
+				return fmt.Errorf("invalid number of CephBlockPools for storage profile %q: found %d, expecting 0 or 1", storageProfile.Name, cbpItemsLen)
 			}
 		}
 
+		// check if a CephBlockPoolRadosNamespace resource exists for the desired storageconsumer and storageprofile.
+		if r.cephRadosNamespace.Name == "" {
+			cephRadosNamespaceList := &rookCephv1.CephBlockPoolRadosNamespaceList{}
+			err := r.Client.List(
+				r.ctx,
+				cephRadosNamespaceList,
+				client.InNamespace(r.OperatorNamespace),
+				client.MatchingFields{util.OwnerUIDIndexName: string(r.StorageClassRequest.UID)})
+			if err != nil {
+				return err
+			}
+
+			// if we found no CephBlockPoolRadosNamespaces, generate a new name
+			// if we found only one CephBlockPoolRadosNamespace with our query, we're good
+			// if we found more than one CephBlockPoolRadosNamespace, we can't determine which one to select, so error out
+			rnsItemsLen := len(cephRadosNamespaceList.Items)
+			if rnsItemsLen == 0 {
+				rnsNewName := fmt.Sprintf("cephradosnamespace-%s-%s", r.cephBlockPool.Name, r.storageConsumer.Name)
+				r.log.V(1).Info("no valid CephBlockPoolRadosNamespace found, creating new one", "CephBlockPoolRadosNamespace", rnsNewName)
+				r.cephRadosNamespace.Name = rnsNewName
+			} else if rnsItemsLen == 1 {
+				cephRns := cephRadosNamespaceList.Items[0]
+				if r.cephBlockPool.Name != "" && cephRns.Spec.BlockPoolName != r.cephBlockPool.Name {
+					return fmt.Errorf("found CephBlockPoolRadosNamespace %q with BlockPoolName %q, but CephResources lists CephBlockPool %q", cephRns.Name, cephRns.Spec.BlockPoolName, r.cephBlockPool.Name)
+				}
+				r.cephRadosNamespace.Name = cephRns.GetName()
+				r.log.V(1).Info("valid CephBlockPoolRadosNamespace found", "CephBlockPoolRadosNamespace", r.cephRadosNamespace.Name)
+			} else {
+				return fmt.Errorf("invalid number of CephBlockPoolRadosNamespaces for storage consumer %q and storage profile %q: found %d, expecting 0 or 1", r.storageConsumer.Name, storageProfile.Name, rnsItemsLen)
+			}
+		}
+
+		r.cephRadosNamespace.Spec.BlockPoolName = r.cephBlockPool.Name
+
 	} else if r.StorageClassRequest.Spec.Type == "sharedfilesystem" {
 		r.cephFilesystemSubVolumeGroup = &rookCephv1.CephFilesystemSubVolumeGroup{}
 		r.cephFilesystemSubVolumeGroup.Namespace = r.OperatorNamespace
@@ -321,6 +376,10 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err
 				return reconcile.Result{}, err
 			}
 
+			if err := r.reconcileRadosNamespace(&storageProfile); err != nil {
+				return reconcile.Result{}, err
+			}
+
 		} else if r.StorageClassRequest.Spec.Type == "sharedfilesystem" {
 			if err := r.reconcileCephClientCephFSProvisioner(); err != nil {
 				return reconcile.Result{}, err
@@ -417,6 +476,41 @@ func (r *StorageClassRequestReconciler) reconcileCephBlockPool(storageProfile *v
 	return nil
 }
 
+func (r *StorageClassRequestReconciler) reconcileRadosNamespace(storageProfile *v1.StorageProfile) error {
+	_, err := ctrl.CreateOrUpdate(r.ctx, r.Client, r.cephRadosNamespace, func() error {
+		if err := r.own(r.cephRadosNamespace); err != nil {
+			return err
+		}
+
+		addLabel(r.cephRadosNamespace, controllers.StorageConsumerNameLabel, r.storageConsumer.Name)
+		addLabel(r.cephBlockPool, controllers.StorageProfileSpecLabel, storageProfile.GetSpecHash())
+
+		r.cephRadosNamespace.Spec = rookCephv1.CephBlockPoolRadosNamespaceSpec{
+			BlockPoolName: r.cephBlockPool.Name,
+		}
+		return nil
+	})
+
+	if err != nil {
+		r.log.Error(
+			err,
+			"Failed to update CephBlockPoolRadosNamespace.",
+			"CephBlockPoolRadosNamespace",
+			klog.KRef(r.cephRadosNamespace.Namespace, r.cephRadosNamespace.Name),
+		)
+		return err
+	}
+
+	phase := ""
+	if r.cephRadosNamespace.Status != nil {
+		phase = string(r.cephRadosNamespace.Status.Phase)
+	}
+
+	r.setCephResourceStatus(r.cephRadosNamespace.Name, "CephBlockPoolRadosNamespace", phase, nil)
+
+	return nil
+}
+
 func (r *StorageClassRequestReconciler) reconcileCephFilesystemSubVolumeGroup(storageProfile *v1.StorageProfile) error {
 
 	cephFilesystem := rookCephv1.CephFilesystem{
@@ -690,9 +784,3 @@ func addLabel(obj metav1.Object, key string, value string) {
 	}
 	labels[key] = value
 }
-
-// generateUUID generates a random UUID string and return first 8 characters.
-func generateUUID() string {
-	newUUID := uuid.New().String()
-	return newUUID[:8]
-}
diff --git a/controllers/storageclassrequest/storageclassrequest_controller_test.go b/controllers/storageclassrequest/storageclassrequest_controller_test.go
index 42d1c3f139..7c10e00e1d 100644
--- a/controllers/storageclassrequest/storageclassrequest_controller_test.go
+++ b/controllers/storageclassrequest/storageclassrequest_controller_test.go
@@ -179,6 +179,7 @@ func createFakeReconciler(t *testing.T) StorageClassRequestReconciler {
 func newFakeClientBuilder(scheme *runtime.Scheme) *fake.ClientBuilder {
 	return fake.NewClientBuilder().
 		WithScheme(scheme).
+		WithIndex(&rookCephv1.CephBlockPoolRadosNamespace{}, util.OwnerUIDIndexName, util.OwnersIndexFieldFunc).
 		WithIndex(&rookCephv1.CephFilesystemSubVolumeGroup{}, util.OwnerUIDIndexName, util.OwnersIndexFieldFunc)
 }
 
@@ -495,7 +496,7 @@ func TestCephBlockPool(t *testing.T) {
 			label: "No CephBlockPool exists",
 		},
 		{
-			label:            "Valid CephBlockPool exists",
+			label:            "Valid CephBlockPool and RadosNamespace exist",
 			expectedPoolName: "test-blockpool",
 			createObjects: []runtime.Object{
 				&rookCephv1.CephBlockPool{
@@ -503,8 +504,7 @@ func TestCephBlockPool(t *testing.T) {
 						Name:      "test-blockpool",
 						Namespace: "test-ns",
 						Labels: map[string]string{
-							controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
-							controllers.StorageProfileSpecLabel:  fakeStorageProfile.GetSpecHash(),
+							controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(),
 						},
 					},
 				},
@@ -513,25 +513,51 @@ func TestCephBlockPool(t *testing.T) {
 						Name:      "test-blockpool2",
 						Namespace: "test-ns",
 						Labels: map[string]string{
-							controllers.StorageConsumerNameLabel: "wrongConsumer",
-							controllers.StorageProfileSpecLabel:  "0123456789",
+							controllers.StorageProfileSpecLabel: "0123456789",
+						},
+					},
+				},
+				&rookCephv1.CephBlockPoolRadosNamespace{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "cephradosnamespace-test-blockpool-test-consumer",
+						Namespace: "test-ns",
+						OwnerReferences: []metav1.OwnerReference{
+							{
+								UID: storageClassRequestUID,
+							},
 						},
 					},
+					Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{
+						BlockPoolName: "test-blockpool",
+					},
 				},
 			},
 		},
 		{
-			label: "Valid CephBlockPool only exists for different consumer/profile",
+			label: "Valid CephBlockPool and RadosNamespace only exist for different profile",
 			createObjects: []runtime.Object{
 				&rookCephv1.CephBlockPool{
 					ObjectMeta: metav1.ObjectMeta{
 						Name:      "test-blockpool",
 						Namespace: "test-ns",
 						Labels: map[string]string{
-							controllers.StorageConsumerNameLabel: "wrongConsumer",
-							controllers.StorageProfileSpecLabel:  "0123456789",
+							controllers.StorageProfileSpecLabel: "0123456789",
+						},
+					},
+				},
+				&rookCephv1.CephBlockPoolRadosNamespace{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "cephradosnamespace-test-blockpool-test-consumer",
+						Namespace: "test-ns",
+						OwnerReferences: []metav1.OwnerReference{
+							{
+								UID: "0123456789",
+							},
 						},
 					},
+					Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{
+						BlockPoolName: "test-blockpool",
+					},
 				},
 			},
 		},
@@ -544,8 +570,7 @@ func TestCephBlockPool(t *testing.T) {
 						Name:      "test-blockpool",
 						Namespace: "test-ns",
 						Labels: map[string]string{
-							controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
-							controllers.StorageProfileSpecLabel:  fakeStorageProfile.GetSpecHash(),
+							controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(),
 						},
 					},
 				},
@@ -554,21 +579,67 @@ func TestCephBlockPool(t *testing.T) {
 						Name:      "test-blockpool2",
 						Namespace: "test-ns",
 						Labels: map[string]string{
-							controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
-							controllers.StorageProfileSpecLabel:  fakeStorageProfile.GetSpecHash(),
+							controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(),
+						},
+					},
+				},
+			},
+		},
+		{
+			label:           "More than one valid RadosNamespace exists",
+			failureExpected: true,
+			createObjects: []runtime.Object{
+				&rookCephv1.CephBlockPool{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "test-blockpool",
+						Namespace: "test-ns",
+						Labels: map[string]string{
+							controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(),
+						},
+					},
+				},
+				&rookCephv1.CephBlockPoolRadosNamespace{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "test-blockpool-rns",
+						Namespace: "test-ns",
+						OwnerReferences: []metav1.OwnerReference{
+							{
+								UID: storageClassRequestUID,
+							},
+						},
+					},
+					Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{
+						BlockPoolName: "test-blockpool",
+					},
+				},
+				&rookCephv1.CephBlockPoolRadosNamespace{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "test-blockpool-rns2",
+						Namespace: "test-ns",
+						OwnerReferences: []metav1.OwnerReference{
+							{
+								UID: storageClassRequestUID,
+							},
 						},
 					},
+					Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{
+						BlockPoolName: "test-blockpool",
+					},
 				},
 			},
 		},
 		{
-			label:            "Request status already has valid CephResource",
+			label:            "Request status has existing CephBlockPool and inextant RadosNamepace",
 			expectedPoolName: "test-blockpool",
 			cephResources: []*v1alpha1.CephResourcesSpec{
 				{
 					Name: "test-blockpool",
 					Kind: "CephBlockPool",
 				},
+				{
+					Name: "test-blockpool-rns",
+					Kind: "CephBlockPoolRadosNamespace",
+				},
 			},
 			createObjects: []runtime.Object{
 				&rookCephv1.CephBlockPool{
@@ -576,21 +647,40 @@ func TestCephBlockPool(t *testing.T) {
 						Name:      "test-blockpool",
 						Namespace: "test-ns",
 						Labels: map[string]string{
-							controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
-							controllers.StorageProfileSpecLabel:  fakeStorageProfile.GetSpecHash(),
+							controllers.StorageProfileSpecLabel: fakeStorageProfile.GetSpecHash(),
 						},
 					},
 				},
 			},
 		},
 		{
-			label:            "Request status has CephResource that doesn't exist",
+			label:            "Request status has existing RadosNamespace and inextant CephBlockPool",
 			expectedPoolName: "test-blockpool",
 			cephResources: []*v1alpha1.CephResourcesSpec{
 				{
 					Name: "test-blockpool",
 					Kind: "CephBlockPool",
 				},
+				{
+					Name: "test-blockpool-rns",
+					Kind: "CephBlockPoolRadosNamespace",
+				},
+			},
+			createObjects: []runtime.Object{
+				&rookCephv1.CephBlockPoolRadosNamespace{
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "test-blockpool-rns",
+						Namespace: "test-ns",
+						OwnerReferences: []metav1.OwnerReference{
+							{
+								UID: storageClassRequestUID,
+							},
+						},
+					},
+					Spec: rookCephv1.CephBlockPoolRadosNamespaceSpec{
+						BlockPoolName: "test-blockpool",
+					},
+				},
 			},
 		},
 	}
@@ -628,6 +718,30 @@ func TestCephBlockPool(t *testing.T) {
 		} else {
 			assert.Equal(t, c.expectedPoolName, r.cephBlockPool.Name, caseLabel)
 		}
+
+		// The generated CephBlockPoolRadosNamespace name is expected
+		// to be deterministic, so hard-coding the name generation in
+		// the test to guard against unintentional changes.
+		expectedRadosNamespaceName := fmt.Sprintf("cephradosnamespace-%s-%s", r.cephBlockPool.Name, r.storageConsumer.Name)
+		for _, cephRes := range c.cephResources {
+			if cephRes.Kind == "CephBlockPoolRadosNamespace" {
+				expectedRadosNamespaceName = cephRes.Name
+				break
+			}
+		}
+		expectedRadosNamespace := &rookCephv1.CephBlockPoolRadosNamespace{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      expectedRadosNamespaceName,
+				Namespace: r.cephRadosNamespace.Namespace,
+			},
+		}
+
+		assert.NotEmpty(t, r.cephRadosNamespace, caseLabel)
+		assert.Equal(t, expectedRadosNamespaceName, r.cephRadosNamespace.Name, caseLabel)
+		assert.Equal(t, r.cephBlockPool.Name, r.cephRadosNamespace.Spec.BlockPoolName, caseLabel)
+
+		err = r.get(expectedRadosNamespace)
+		assert.NoError(t, err, caseLabel)
 	}
 
 	caseCounter++
diff --git a/controllers/storageconsumer/storageconsumer_controller.go b/controllers/storageconsumer/storageconsumer_controller.go
index 8a26b2788d..c8ef6703f9 100644
--- a/controllers/storageconsumer/storageconsumer_controller.go
+++ b/controllers/storageconsumer/storageconsumer_controller.go
@@ -44,6 +44,7 @@ const (
 	StorageConsumerAnnotation     = "ocs.openshift.io.storageconsumer"
 	StorageRequestAnnotation      = "ocs.openshift.io.storagerequest"
 	StorageCephUserTypeAnnotation = "ocs.openshift.io.cephusertype"
+	StorageProfileLabel           = "ocs.openshift.io/storageprofile"
 	StorageProfileSpecLabel       = "ocs.openshift.io/storageprofile-spec"
 	ConsumerUUIDLabel             = "ocs.openshift.io/storageconsumer-uuid"
 	StorageConsumerNameLabel      = "ocs.openshift.io/storageconsumer-name"