Skip to content

Commit

Permalink
controllers: ensure default storageclient exists in provider mode
Browse files Browse the repository at this point in the history
Signed-off-by: Leela Venkaiah G <[email protected]>
  • Loading branch information
leelavg committed Mar 15, 2024
1 parent 6aa944d commit b4b5935
Show file tree
Hide file tree
Showing 11 changed files with 176 additions and 76 deletions.
7 changes: 4 additions & 3 deletions controllers/storagecluster/provider_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ func (o *ocsProviderServer) ensureCreated(r *StorageClusterReconciler, instance

if !instance.Spec.AllowRemoteStorageConsumers {
r.Log.Info("Spec.AllowRemoteStorageConsumers is disabled")
if err := r.verifyNoStorageConsumerExist(instance); err != nil {
return reconcile.Result{}, err
}
return o.ensureDeleted(r, instance)
}

Expand Down Expand Up @@ -88,6 +85,10 @@ func (o *ocsProviderServer) ensureDeleted(r *StorageClusterReconciler, instance

// NOTE: Do not add the check

if err := r.verifyNoStorageConsumerExist(instance, false /* skip local */); err != nil {
return reconcile.Result{}, err
}

var finalErr error

for _, resource := range []client.Object{
Expand Down
1 change: 1 addition & 0 deletions controllers/storagecluster/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ func (r *StorageClusterReconciler) reconcilePhases(
// preserve list order
objs = []resourceManager{
&ocsProviderServer{},
&storageClient{},
&backingStorageClasses{},
&ocsTopologyMap{},
&ocsStorageQuota{},
Expand Down
63 changes: 63 additions & 0 deletions controllers/storagecluster/storageclient.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
package storagecluster

import (
"fmt"

ocsclientv1a1 "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
"github.com/red-hat-storage/ocs-operator/v4/services"
kerrors "k8s.io/apimachinery/pkg/api/errors"
cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

const (
storageClientName = "local-storageclient"

tokenLifetimeInHours = 48
onboardingPrivateKeyFilePath = "/etc/private-key/key"
)

type storageClient struct{}

var _ resourceManager = &storageClient{}

func (s *storageClient) ensureCreated(r *StorageClusterReconciler, storagecluster *ocsv1.StorageCluster) (reconcile.Result, error) {

if !storagecluster.Spec.AllowRemoteStorageConsumers {
r.Log.Info("Spec.AllowRemoteStorageConsumers is disabled")
return s.ensureDeleted(r, storagecluster)
}

storageClient := &ocsclientv1a1.StorageClient{}
storageClient.Name = storageClientName
storageClient.Namespace = r.OperatorNamespace
_, err := cutil.CreateOrUpdate(r.ctx, r.Client, storageClient, func() error {
if storageClient.Status.ConsumerID == "" {
token, err := services.GenerateOnboardingToken(tokenLifetimeInHours, onboardingPrivateKeyFilePath)
if err != nil {
return fmt.Errorf("unable to generate onboarding token: %v", err)
}
storageClient.Spec.OnboardingTicket = token
}
storageClient.Spec.StorageProviderEndpoint = storagecluster.Status.StorageProviderEndpoint
return nil
})
if err != nil {
r.Log.Error(err, "Failed to create local StorageClient CR")
return reconcile.Result{}, nil
}

return reconcile.Result{}, nil
}

func (s *storageClient) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
storageClient := &ocsclientv1a1.StorageClient{}
storageClient.Name = storageClientName
storageClient.Namespace = r.OperatorNamespace
if err := r.Delete(r.ctx, storageClient); err != nil && !kerrors.IsNotFound(err) {
r.Log.Error(err, "Failed to initiate deletion of local StorageClient CR")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
4 changes: 3 additions & 1 deletion controllers/storagecluster/storagecluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1"
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
"github.com/operator-framework/operator-lib/conditions"
ocsclientv1a1 "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1"
"github.com/red-hat-storage/ocs-operator/v4/controllers/util"
Expand Down Expand Up @@ -181,7 +182,8 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
},
enqueueStorageClusterRequest,
).
Watches(&ocsv1alpha1.StorageConsumer{}, enqueueStorageClusterRequest, builder.WithPredicates(ocsClientOperatorVersionPredicate))
Watches(&ocsv1alpha1.StorageConsumer{}, enqueueStorageClusterRequest, builder.WithPredicates(ocsClientOperatorVersionPredicate)).
Watches(&ocsclientv1a1.StorageClient{}, enqueueStorageClusterRequest)
if os.Getenv("SKIP_NOOBAA_CRD_WATCH") != "true" {
builder.Owns(&nbv1.NooBaa{})
}
Expand Down
6 changes: 6 additions & 0 deletions controllers/storagecluster/storagecluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
openshiftv1 "github.com/openshift/api/template/v1"
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
ocsclientv1a1 "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1"
api "github.com/red-hat-storage/ocs-operator/api/v4/v1"
ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1"
"github.com/red-hat-storage/ocs-operator/v4/controllers/defaults"
Expand Down Expand Up @@ -1213,6 +1214,11 @@ func createFakeScheme(t *testing.T) *runtime.Scheme {
assert.Fail(t, "failed to add ocsv1alpha1 scheme")
}

err = ocsclientv1a1.AddToScheme(scheme)
if err != nil {
assert.Fail(t, "failed to add ocsclientv1a1 scheme")
}

return scheme
}

Expand Down
24 changes: 20 additions & 4 deletions controllers/storagecluster/uninstall_reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@ import (
"context"
"encoding/json"
"fmt"
"strings"

nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1"
"github.com/red-hat-storage/ocs-operator/v4/controllers/defaults"
"github.com/red-hat-storage/ocs-operator/v4/controllers/util"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -270,15 +272,28 @@ func (r *StorageClusterReconciler) reconcileUninstallAnnotations(sc *ocsv1.Stora
return false, nil
}

// verifyNoStorageConsumerExist verifies there are no storageConsumers on the same namespace
func (r *StorageClusterReconciler) verifyNoStorageConsumerExist(instance *ocsv1.StorageCluster) error {
// verifyNoStorageConsumerExist verifies there are no storageConsumers on the same namespace conditionally
func (r *StorageClusterReconciler) verifyNoStorageConsumerExist(instance *ocsv1.StorageCluster, skipLocalConsumer bool) error {

storageConsumers := &ocsv1alpha1.StorageConsumerList{}
err := r.Client.List(context.TODO(), storageConsumers, &client.ListOptions{Namespace: instance.Namespace})
if err != nil {
return err
}

if len(storageConsumers.Items) == 1 && skipLocalConsumer {
clusterID := util.GetClusterID(r.ctx, r.Client, &r.Log)
if clusterID == "" {
return fmt.Errorf("failed to get cluster id")
}

consumer := &storageConsumers.Items[0]
if strings.HasSuffix(consumer.Name, clusterID) {
r.Log.Info("Only local storage consumer exist")
return nil
}
}

if len(storageConsumers.Items) != 0 {
err = fmt.Errorf("Failed to cleanup provider resources. StorageConsumers are present in the %s namespace. "+
"Offboard all consumer clusters for the provider cleanup to proceed", instance.Namespace)
Expand All @@ -296,7 +311,7 @@ func (r *StorageClusterReconciler) deleteResources(sc *ocsv1.StorageCluster) (re
// we do not check if it is a provider before checking storageConsumers CR because of corner case
// where user can mark instance.Spec.AllowRemoteStorageConsumers as false and mark CR for deletion immediately.
// Which will trigger a deletion without having instance.Spec.AllowRemoteStorageConsumers as true.
err := r.verifyNoStorageConsumerExist(sc)
err := r.verifyNoStorageConsumerExist(sc, true /* skip local */)
if err != nil {
return reconcile.Result{}, err
}
Expand Down Expand Up @@ -325,9 +340,10 @@ func (r *StorageClusterReconciler) deleteResources(sc *ocsv1.StorageCluster) (re
}

objs := []resourceManager{
&storageClient{},
&ocsExternalResources{},
&ocsProviderServer{},
&ocsNoobaaSystem{},
&ocsProviderServer{},
&ocsCephRGWRoutes{},
&ocsCephObjectStoreUsers{},
&ocsCephObjectStores{},
Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/uninstall_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -843,7 +843,7 @@ func TestVerifyNoStorageConsumerExist(t *testing.T) {
err := r.Client.Create(context.TODO(), storageConsumer)
assert.NoError(t, err)

err = r.verifyNoStorageConsumerExist(instance)
err = r.verifyNoStorageConsumerExist(instance, false /* skip local */)
assert.Error(t, err)
expectedErr := fmt.Errorf("Failed to cleanup provider resources. StorageConsumers are present in the namespace. " +
"Offboard all consumer clusters for the provider cleanup to proceed")
Expand All @@ -854,7 +854,7 @@ func TestVerifyNoStorageConsumerExist(t *testing.T) {

r, instance := createSetup()

err := r.verifyNoStorageConsumerExist(instance)
err := r.verifyNoStorageConsumerExist(instance, false /* skip local */)
assert.NoError(t, err)
})
}
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ require (
github.com/operator-framework/operator-lifecycle-manager v0.26.0
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.70.0
github.com/prometheus-operator/prometheus-operator/pkg/client v0.70.0
github.com/red-hat-storage/ocs-client-operator v0.0.0-20240216124345-1b9b7fb23b8d
github.com/red-hat-storage/ocs-operator/api/v4 v4.0.0-00010101000000-000000000000
github.com/rook/rook/pkg/apis v0.0.0-20231215165123-32de0fb5f69b
github.com/stretchr/testify v1.8.4
Expand Down
2 changes: 2 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
apiv2 "github.com/operator-framework/api/pkg/operators/v2"
"github.com/operator-framework/operator-lib/conditions"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
ocsclientv1a1 "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1"
"github.com/red-hat-storage/ocs-operator/v4/controllers/namespace"
Expand Down Expand Up @@ -92,6 +93,7 @@ func init() {
utilruntime.Must(clusterv1alpha1.AddToScheme(scheme))
utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme))
utilruntime.Must(nadscheme.AddToScheme(scheme))
utilruntime.Must(ocsclientv1a1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}

Expand Down
73 changes: 73 additions & 0 deletions services/tokengenerator.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package services

import (
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"os"
"time"

"github.com/google/uuid"
)

// GenerateOnboardingToken generates a token valid for a duration of "tokenLifetimeInHours".
// The token content is predefined and signed by the private key which'll be read from supplied "privateKeyPath".
func GenerateOnboardingToken(tokenLifetimeInHours int, privateKeyPath string) (string, error) {
tokenExpirationDate := time.Now().
Add(time.Duration(tokenLifetimeInHours) * time.Hour).
Unix()

payload, err := json.Marshal(OnboardingTicket{
ID: uuid.New().String(),
ExpirationDate: tokenExpirationDate,
})
if err != nil {
return "", fmt.Errorf("failed to marshal the payload: %v", err)
}

encodedPayload := base64.StdEncoding.EncodeToString(payload)
// Before signing, we need to hash our message
// The hash is what we actually sign
msgHash := sha256.New()
_, err = msgHash.Write(payload)
if err != nil {
return "", fmt.Errorf("failed to hash onboarding token payload: %v", err)
}

privateKey, err := readAndDecodePrivateKey(privateKeyPath)
if err != nil {
return "", fmt.Errorf("failed to read and decode private key: %v", err)
}

msgHashSum := msgHash.Sum(nil)
// In order to generate the signature, we provide a random number generator,
// our private key, the hashing algorithm that we used, and the hash sum
// of our message
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, msgHashSum)
if err != nil {
return "", fmt.Errorf("failed to sign private key: %v", err)
}

encodedSignature := base64.StdEncoding.EncodeToString(signature)
return fmt.Sprintf("%s.%s", encodedPayload, encodedSignature), nil
}

func readAndDecodePrivateKey(privateKeyPath string) (*rsa.PrivateKey, error) {
pemString, err := os.ReadFile(privateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read private key: %v", err)
}

Block, _ := pem.Decode(pemString)
privateKey, err := x509.ParsePKCS1PrivateKey(Block.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse private key: %v", err)
}
return privateKey, nil
}
Loading

0 comments on commit b4b5935

Please sign in to comment.