From df4b31b152c9a4eec90422dc2c790e3b66b96876 Mon Sep 17 00:00:00 2001 From: rknaur Date: Thu, 28 Nov 2024 13:55:28 +0100 Subject: [PATCH] tmp --- exp/controllers/rosamachinepool_controller.go | 6 +- .../rosamachinepool_controller_test.go | 320 ++++++++++++++---- exp/controllers/suite_test.go | 13 + pkg/cloud/scope/rosamachinepool.go | 12 +- pkg/cloud/scope/session.go | 1 + 5 files changed, 291 insertions(+), 61 deletions(-) diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 7fad8a9aec..08bb4f843b 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -164,6 +164,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ if !controlPlane.Status.Ready && controlPlane.ObjectMeta.DeletionTimestamp.IsZero() { log.Info("Control plane is not ready yet") err := machinePoolScope.RosaMchinePoolReadyFalse(expinfrav1.WaitingForRosaControlPlaneReason, "") + return ctrl.Result{}, err } @@ -201,11 +202,12 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, } failureMessage, err := validateMachinePoolSpec(machinePoolScope) + if err != nil { return ctrl.Result{}, fmt.Errorf("failed to validate ROSAMachinePool.spec: %w", err) } + if failureMessage != nil { - machinePoolScope.RosaMachinePool.Status.FailureMessage = failureMessage // dont' requeue because input is invalid and manual intervention is needed. return ctrl.Result{}, nil } @@ -225,6 +227,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, } nodePool, found, err := ocmClient.GetNodePool(machinePoolScope.ControlPlane.Status.ID, rosaMachinePool.Spec.NodePoolName) + if err != nil { return ctrl.Result{}, err } @@ -297,6 +300,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, } machinePoolScope.RosaMachinePool.Status.ID = nodePool.ID() + return ctrl.Result{}, nil } diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go index 5a978b5801..47fec01537 100644 --- a/exp/controllers/rosamachinepool_controller_test.go +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -18,19 +18,19 @@ import ( "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" ) func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { @@ -81,13 +81,13 @@ func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { } func TestRosaMachinePoolReconcile(t *testing.T) { - g := NewWithT(t) var ( recorder *record.FakeRecorder mockCtrl *gomock.Controller ctx context.Context scheme *runtime.Scheme ns *corev1.Namespace + identity *infrav1.AWSClusterControllerIdentity secret *corev1.Secret rosaControlPlane *rosacontrolplanev1.ROSAControlPlane ownerCluster *clusterv1.Cluster @@ -98,12 +98,13 @@ func TestRosaMachinePoolReconcile(t *testing.T) { err error ) - setup := func(t *testing.T) { + setup := func(t *testing.T, g *WithT) { t.Helper() mockCtrl = gomock.NewController(t) recorder = record.NewFakeRecorder(10) ctx = context.TODO() scheme = runtime.NewScheme() + // scheme = testEnv.Scheme() ns, err = testEnv.CreateNamespace(ctx, "test-namespace") g.Expect(err).To(BeNil()) @@ -123,6 +124,17 @@ func TestRosaMachinePoolReconcile(t *testing.T) { "ocmToken": []byte("secret-ocm-token-string"), }, } + identity = &infrav1.AWSClusterControllerIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: infrav1.AWSClusterControllerIdentitySpec{ + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + identity.SetGroupVersionKind(infrav1.GroupVersion.WithKind("AWSClusterStaticIdentity")) rosaControlPlane = &rosacontrolplanev1.ROSAControlPlane{ ObjectMeta: metav1.ObjectMeta{Name: "rosa-control-plane", Namespace: ns.Name}, @@ -149,9 +161,14 @@ func TestRosaMachinePoolReconcile(t *testing.T) { CredentialsSecretRef: &corev1.LocalObjectReference{ Name: secret.Name, }, + VersionGate: "Acknowledge", + IdentityRef: &infrav1.AWSIdentityReference{ + Name: identity.Name, + Kind: infrav1.ControllerIdentityKind, + }, }, Status: rosacontrolplanev1.RosaControlPlaneStatus{ - Ready: true, + Ready: false, ID: "rosa-control-plane1", }, } @@ -174,12 +191,20 @@ func TestRosaMachinePoolReconcile(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "rosa-machinepool", Namespace: ns.Name, + UID: "rosa-machinepool-1", }, TypeMeta: metav1.TypeMeta{ Kind: "ROSAMachinePool", APIVersion: expinfrav1.GroupVersion.String(), }, - Spec: expinfrav1.RosaMachinePoolSpec{}, + Spec: expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool", + Version: "4.14.5", + // Version: "4.99.5", + + Subnet: "subnet-id", + InstanceType: "m5.large", + }, } ownerMachinePool = &expclusterv1.MachinePool{ @@ -220,14 +245,16 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, } - objects = []client.Object{secret, ownerCluster, ownerMachinePool} + objects = []client.Object{secret, ownerCluster, ownerMachinePool, rosaMachinePool, rosaControlPlane, identity} for _, obj := range objects { createObject(g, obj, ns.Name) } } - teardown := func() { + teardown := func(t *testing.T, g *WithT) { + err = nil + t.Helper() mockCtrl.Finish() for _, obj := range objects { cleanupObject(g, obj) @@ -235,8 +262,9 @@ func TestRosaMachinePoolReconcile(t *testing.T) { } t.Run("Reconcile create node pool", func(t *testing.T) { - setup(t) - defer teardown() + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) ocmMock = mocks.NewMockOCMClient(mockCtrl) expect := func(m *mocks.MockOCMClientMockRecorder) { m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { @@ -248,7 +276,8 @@ func TestRosaMachinePoolReconcile(t *testing.T) { } expect(ocmMock.EXPECT()) - c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rosaMachinePool, ownerCluster, ownerMachinePool, rosaControlPlane, secret).Build() + g.Expect(err).NotTo(HaveOccurred()) + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) @@ -256,7 +285,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Recorder: recorder, WatchFilterValue: "", Endpoints: []scope.ServiceEndpoint{}, - Client: c, + Client: testEnv, NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { return ocmMock, nil @@ -266,56 +295,231 @@ func TestRosaMachinePoolReconcile(t *testing.T) { req := ctrl.Request{} req.NamespacedName = types.NamespacedName{Name: "rosa-machinepool", Namespace: ns.Name} - result, err := r.Reconcile(ctx, req) + m := &expinfrav1.ROSAMachinePool{} - g.Expect(err).ToNot(HaveOccurred()) + mpPh, err := patch.NewHelper(rosaControlPlane, testEnv) + rosaControlPlane.Status.Ready = true + g.Expect(mpPh.Patch(ctx, rosaControlPlane)).To(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + + result, err2 := r.Reconcile(ctx, req) + g.Expect(err2).ToNot(HaveOccurred()) g.Expect(result).To(Equal(ctrl.Result{})) + + time.Sleep(100 * time.Millisecond) + + key := client.ObjectKey{Name: rosaMachinePool.Name, Namespace: ns.Name} + err3 := testEnv.Get(ctx, key, m) + g.Expect(err3).To(HaveOccurred()) + g.Expect(m.Status.ID).To(Equal(rosaMachinePool.Spec.NodePoolName)) }) - // t.Run("Reconcile delete", func(t *testing.T) { - // setup(t) - // defer teardown() - - // deleteTime := metav1.NewTime(time.Now().Add(5 * time.Second)) - // rosaMachinePool.ObjectMeta.Finalizers = []string{"finalizer-rosa"} - // rosaMachinePool.ObjectMeta.DeletionTimestamp = &deleteTime - - // ocmMock := mocks.NewMockOCMClient(mockCtrl) - // expect := func(m *mocks.MockOCMClientMockRecorder) { - // m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { - // nodePoolBuilder := nodePoolBuilder(rosaMachinePool.Spec, ownerMachinePool.Spec) - // nodePool, err := nodePoolBuilder.ID("node-pool-1").Build() - // g.Expect(err).To(BeNil()) - // return nodePool, true, nil - // }).Times(1) - // m.DeleteNodePool("rosa-control-plane1", "node-pool-1").DoAndReturn(func(clusterId string, nodePoolID string) error { - // return nil - // }).Times(1) - // } - // expect(ocmMock.EXPECT()) - - // client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rosaMachinePool, ownerCluster, ownerMachinePool, rosaControlPlane, secret).Build() - // stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) - // stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) - - // r := ROSAMachinePoolReconciler{ - // Recorder: recorder, - // WatchFilterValue: "", - // Endpoints: []scope.ServiceEndpoint{}, - // Client: client, - // NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, - // NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { - // return ocmMock, nil - // }, - // } - - // req := ctrl.Request{} - // req.NamespacedName = types.NamespacedName{Name: "rosa-machinepool", Namespace: ns.Name} - - // result, err := r.Reconcile(ctx, req) - // g.Expect(err).ToNot(HaveOccurred()) - // g.Expect(result).To(Equal(ctrl.Result{})) - // }) + t.Run("Nodepool exist, but is not ready", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) + ocmMock = mocks.NewMockOCMClient(mockCtrl) + expect := func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + nodePoolBuilder := nodePoolBuilder(rosaMachinePool.Spec, ownerMachinePool.Spec) + nodePool, err := nodePoolBuilder.ID("node-pool-1").Build() + g.Expect(err).To(BeNil()) + return nodePool, true, nil + }).Times(1) + m.UpdateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + return nodePool, nil + }).Times(1) + m.CreateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + return nodePool, nil + }).Times(0) + } + expect(ocmMock.EXPECT()) + + g.Expect(err).NotTo(HaveOccurred()) + + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) + + r := ROSAMachinePoolReconciler{ + Recorder: recorder, + WatchFilterValue: "", + Endpoints: []scope.ServiceEndpoint{}, + Client: testEnv, + NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, + NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { + return ocmMock, nil + }, + } + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: "rosa-machinepool", Namespace: ns.Name} + + m := &expinfrav1.ROSAMachinePool{} + + mpPh, err := patch.NewHelper(rosaControlPlane, testEnv) + rosaControlPlane.Status.Ready = true + g.Expect(mpPh.Patch(ctx, rosaControlPlane)).To(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + + result, err2 := r.Reconcile(ctx, req) + g.Expect(err2).ToNot(HaveOccurred()) + g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: time.Second * 60})) + + time.Sleep(100 * time.Millisecond) + + key := client.ObjectKey{Name: rosaMachinePool.Name, Namespace: ns.Name} + err3 := testEnv.Get(ctx, key, m) + g.Expect(err3).ToNot(HaveOccurred()) + g.Expect(m.Status.Ready).To(BeTrue()) + g.Expect(m.Status.Replicas).To(Equal(int32(0))) + }) + + t.Run("Nodepool is ready", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) + ocmMock = mocks.NewMockOCMClient(mockCtrl) + expect := func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + nodePoolBuilder := nodePoolBuilder(rosaMachinePool.Spec, ownerMachinePool.Spec) + statusBuilder := (&cmv1.NodePoolStatusBuilder{}).CurrentReplicas(1) + autoscalingBuilder := (&cmv1.NodePoolAutoscalingBuilder{}).MinReplica(1).MaxReplica(1) + nodePool, err := nodePoolBuilder.ID("node-pool-1").Autoscaling(autoscalingBuilder).Replicas(1).Status(statusBuilder).Build() + g.Expect(err).NotTo(HaveOccurred()) + + return nodePool, true, nil + }).Times(1) + m.UpdateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + statusBuilder := (&cmv1.NodePoolStatusBuilder{}).CurrentReplicas(1) + version := (&cmv1.VersionBuilder{}).RawID("4.14.5") + npBuilder := cmv1.NodePoolBuilder{} + updatedNodePool, err := npBuilder.Copy(nodePool).Status(statusBuilder).Version(version).Build() + g.Expect(err).NotTo(HaveOccurred()) + + return updatedNodePool, nil + }).Times(1) + m.CreateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + return nodePool, nil + }).Times(0) + } + expect(ocmMock.EXPECT()) + + g.Expect(err).NotTo(HaveOccurred()) + + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) + + r := ROSAMachinePoolReconciler{ + Recorder: recorder, + WatchFilterValue: "", + Endpoints: []scope.ServiceEndpoint{}, + Client: testEnv, + NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, + NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { + return ocmMock, nil + }, + } + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: "rosa-machinepool", Namespace: ns.Name} + + m := &expinfrav1.ROSAMachinePool{} + + mpPh, err := patch.NewHelper(rosaControlPlane, testEnv) + rosaControlPlane.Status.Ready = true + g.Expect(mpPh.Patch(ctx, rosaControlPlane)).To(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + + result, err2 := r.Reconcile(ctx, req) + g.Expect(err2).ToNot(HaveOccurred()) + g.Expect(result).To(Equal(ctrl.Result{})) + + time.Sleep(100 * time.Millisecond) + + key := client.ObjectKey{Name: rosaMachinePool.Name, Namespace: ns.Name} + err3 := testEnv.Get(ctx, key, m) + g.Expect(err3).ToNot(HaveOccurred()) + g.Expect(m.Status.Ready).To(BeTrue()) + + g.Expect(m.Status.Replicas).To(Equal(int32(1))) + }) + + t.Run("Reconcile delete", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) + + mpPh, errPatch := patch.NewHelper(rosaMachinePool, testEnv) + g.Expect(errPatch).ShouldNot(HaveOccurred()) + rosaMachinePool.ObjectMeta.Finalizers = []string{expinfrav1.RosaMachinePoolFinalizer} + g.Expect(mpPh.Patch(ctx, rosaMachinePool)).To(Succeed()) + + ocmMock = mocks.NewMockOCMClient(mockCtrl) + expect := func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + nodePoolBuilder := nodePoolBuilder(rosaMachinePool.Spec, ownerMachinePool.Spec) + nodePool, err := nodePoolBuilder.ID("node-pool-1").Build() + g.Expect(err).NotTo(HaveOccurred()) + return nodePool, true, nil + }).Times(1) + m.DeleteNodePool("rosa-control-plane-status1", "node-pool-1").DoAndReturn(func(clusterId string, nodePoolID string) error { + return nil + }).Times(1) + } + expect(ocmMock.EXPECT()) + + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) + + r := ROSAMachinePoolReconciler{ + Recorder: recorder, + WatchFilterValue: "", + Endpoints: []scope.ServiceEndpoint{}, + Client: testEnv, + NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, + NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { + return ocmMock, nil + }, + } + + // For some reason status gets deleted on creation, needs to set it again + rosaControlPlane.Status = rosacontrolplanev1.RosaControlPlaneStatus{ + Ready: true, + ID: "rosa-control-plane-status1", + } + log := logger.FromContext(ctx) + machinePoolScope, err1 := scope.NewRosaMachinePoolScope(scope.RosaMachinePoolScopeParams{ + Client: r.Client, + ControllerName: "rosamachinepool", + Cluster: ownerCluster, + ControlPlane: rosaControlPlane, + MachinePool: ownerMachinePool, + RosaMachinePool: rosaMachinePool, + Logger: log, + Endpoints: r.Endpoints, + }) + g.Expect(err1).ToNot(HaveOccurred()) + + rosaControlPlaneScope, err2 := scope.NewROSAControlPlaneScope(scope.ROSAControlPlaneScopeParams{ + Client: r.Client, + Cluster: ownerCluster, + ControlPlane: rosaControlPlane, + ControllerName: "rosaControlPlane", + Endpoints: r.Endpoints, + NewStsClient: r.NewStsClient, + }) + g.Expect(err2).ToNot(HaveOccurred()) + + err3 := r.reconcileDelete(ctx, machinePoolScope, rosaControlPlaneScope) + g.Expect(err3).ToNot(HaveOccurred()) + + machinePoolScope.Close() + m := &expinfrav1.ROSAMachinePool{} + key := client.ObjectKey{Name: rosaMachinePool.Name, Namespace: ns.Name} + err4 := testEnv.Get(ctx, key, m) + g.Expect(err4).ToNot(HaveOccurred()) + g.Expect(m.Finalizers).To(BeNil()) + }) } func createObject(g *WithT, obj client.Object, namespace string) { diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 5f7ded08c8..59ff50bb09 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -21,12 +21,14 @@ import ( "path" "testing" + corev1 "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -52,6 +54,8 @@ func setup() { utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) + utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, @@ -76,6 +80,15 @@ func setup() { if err := (&expinfrav1.AWSManagedMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { panic(fmt.Sprintf("Unable to setup AWSManagedMachinePool webhook: %v", err)) } + if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSClusterControllerIdentity webhook: %v", err)) + } + if err := (&expinfrav1.ROSAMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup ROSAMachinePool webhook: %v", err)) + } + if err := (&rosacontrolplanev1.ROSAControlPlane{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup ROSAMachinePool webhook: %v", err)) + } go func() { fmt.Println("Starting the manager") if err := testEnv.StartManager(ctx); err != nil { diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 00d480ca3e..95fd916a8a 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -76,8 +76,9 @@ func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoo } scope := &RosaMachinePoolScope{ - Logger: *params.Logger, - Client: params.Client, + Logger: *params.Logger, + Client: params.Client, + // issue here for tests? patchHelper: ammpHelper, capiMachinePoolPatchHelper: mpHelper, @@ -227,6 +228,13 @@ func (s *RosaMachinePoolScope) PatchCAPIMachinePoolObject(ctx context.Context) e ) } +func (s *RosaMachinePoolScope) PatchRosaMachinePoolObject(ctx context.Context) error { + return s.patchHelper.Patch( + ctx, + s.RosaMachinePool, + ) +} + // Close closes the current scope persisting the control plane configuration and status. func (s *RosaMachinePoolScope) Close() error { return s.PatchObject() diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index 546e11089b..cad54dfcfa 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -406,6 +406,7 @@ func buildAWSClusterControllerIdentity(ctx context.Context, identityObjectKey cl } func getProvidersForCluster(ctx context.Context, k8sClient client.Client, clusterScoper cloud.SessionMetadata, region string, log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { + fmt.Println("clusterScoper", clusterScoper.Namespace(), clusterScoper.InfraClusterName(), clusterScoper.IdentityRef()) providers := make([]identity.AWSPrincipalTypeProvider, 0) providers, err := buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, clusterScoper.IdentityRef(), region, log) if err != nil {