From ba511f2e858d05815fb7ab426e48eaf3a9fe5762 Mon Sep 17 00:00:00 2001 From: Furkhat Kasymov Genii Uulu Date: Mon, 29 Apr 2024 08:55:06 +0300 Subject: [PATCH] move types to common package and define interface on consumer's side --- actions/actions.go | 87 ++++---- actions/actions_test.go | 96 +++------ actions/approve_csr_handler.go | 16 +- actions/approve_csr_handler_test.go | 28 +-- actions/chart_rollback_handler.go | 10 +- actions/chart_rollback_handler_test.go | 28 +-- actions/chart_uninstall_handler.go | 10 +- actions/chart_uninstall_handler_test.go | 24 +-- actions/chart_upsert_handler.go | 10 +- actions/chart_upsert_handler_test.go | 22 +- actions/check_node_deleted.go | 12 +- actions/check_node_handler_test.go | 16 +- actions/check_node_status.go | 24 +-- actions/check_node_status_test.go | 65 +++--- actions/create_event_handler.go | 12 +- actions/create_event_handler_test.go | 20 +- actions/create_handler.go | 12 +- actions/create_handler_test.go | 30 +-- actions/delete_handler.go | 8 +- actions/delete_handler_test.go | 34 +-- actions/delete_node_handler.go | 12 +- actions/delete_node_handler_test.go | 25 ++- actions/disconnect_cluster_handler.go | 9 +- actions/disconnect_cluster_handler_test.go | 8 +- actions/drain_node_handler.go | 12 +- actions/drain_node_handler_test.go | 38 ++-- actions/mock/client.go | 79 +++++++ actions/patch_handler.go | 8 +- actions/patch_handler_test.go | 22 +- actions/patch_node_handler.go | 12 +- actions/patch_node_handler_test.go | 22 +- actions/send_aks_init_data_handler.go | 15 +- actions/send_aks_init_data_handler_test.go | 29 ++- castai/client.go | 25 +-- castai/mock/client.go | 62 ------ castai/types.go | 238 +-------------------- helm/chart_loader.go | 6 +- helm/chart_loader_test.go | 4 +- helm/client.go | 6 +- helm/client_test.go | 4 +- helm/mock/chart_loader.go | 4 +- main.go | 2 +- types/types.go | 220 +++++++++++++++++++ version/mock/version.go | 2 +- 44 files changed, 708 insertions(+), 720 deletions(-) create mode 100644 actions/mock/client.go delete mode 100644 castai/mock/client.go create mode 100644 types/types.go diff --git a/actions/actions.go b/actions/actions.go index 1af65195..dd5397dd 100644 --- a/actions/actions.go +++ b/actions/actions.go @@ -1,3 +1,6 @@ +// Package actions polls, handles and acknowledges actions from mothership for a given cluster. +// +//go:generate mockgen -destination ./mock/client.go . Client package actions import ( @@ -14,9 +17,9 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/health" "github.com/castai/cluster-controller/helm" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) @@ -24,12 +27,14 @@ const ( // actionIDLogField is the log field name for action ID. // This field is used in backend to detect actions ID in logs. actionIDLogField = "id" + labelNodeID = "provisioner.cast.ai/node-id" ) func newUnexpectedTypeErr(value interface{}, expectedType interface{}) error { return fmt.Errorf("unexpected type %T, expected %T", value, expectedType) } +// Config contains parameters to modify actions handling frequency and values required to poll/ack actions. type Config struct { PollWaitInterval time.Duration // How long to wait unit next long polling request. PollTimeout time.Duration // hard timeout. Normally server should return empty result before this timeout. @@ -41,59 +46,64 @@ type Config struct { Namespace string } -type Service interface { - Run(ctx context.Context) +// Client abstracts communication means. +type Client interface { + GetActions(ctx context.Context, k8sVersion string) ([]*types.ClusterAction, error) + AckAction(ctx context.Context, actionID string, errMessage *string) error + SendAKSInitData(ctx context.Context, cloudConfigBase64, protectedSettingsBase64, architecture string) error } -type ActionHandler interface { - Handle(ctx context.Context, action *castai.ClusterAction) error +type actionHandler interface { + Handle(ctx context.Context, action *types.ClusterAction) error } +// NewService returns new Service that can continuously handle actions once started. func NewService( log logrus.FieldLogger, cfg Config, k8sVersion string, clientset *kubernetes.Clientset, dynamicClient dynamic.Interface, - castaiClient castai.ActionsClient, + castaiClient Client, helmClient helm.Client, healthCheck *health.HealthzProvider, -) Service { - return &service{ +) *Service { + return &Service{ log: log, cfg: cfg, k8sVersion: k8sVersion, castAIClient: castaiClient, startedActions: map[string]struct{}{}, - actionHandlers: map[reflect.Type]ActionHandler{ - reflect.TypeOf(&castai.ActionDeleteNode{}): newDeleteNodeHandler(log, clientset), - reflect.TypeOf(&castai.ActionDrainNode{}): newDrainNodeHandler(log, clientset, cfg.Namespace), - reflect.TypeOf(&castai.ActionPatchNode{}): newPatchNodeHandler(log, clientset), - reflect.TypeOf(&castai.ActionCreateEvent{}): newCreateEventHandler(log, clientset), - reflect.TypeOf(&castai.ActionApproveCSR{}): newApproveCSRHandler(log, clientset), - reflect.TypeOf(&castai.ActionChartUpsert{}): newChartUpsertHandler(log, helmClient), - reflect.TypeOf(&castai.ActionChartUninstall{}): newChartUninstallHandler(log, helmClient), - reflect.TypeOf(&castai.ActionChartRollback{}): newChartRollbackHandler(log, helmClient, cfg.Version), - reflect.TypeOf(&castai.ActionDisconnectCluster{}): newDisconnectClusterHandler(log, clientset), - reflect.TypeOf(&castai.ActionSendAKSInitData{}): newSendAKSInitDataHandler(log, castaiClient), - reflect.TypeOf(&castai.ActionCheckNodeDeleted{}): newCheckNodeDeletedHandler(log, clientset), - reflect.TypeOf(&castai.ActionCheckNodeStatus{}): newCheckNodeStatusHandler(log, clientset), - reflect.TypeOf(&castai.ActionPatch{}): newPatchHandler(log, dynamicClient), - reflect.TypeOf(&castai.ActionCreate{}): newCreateHandler(log, dynamicClient), - reflect.TypeOf(&castai.ActionDelete{}): newDeleteHandler(log, dynamicClient), + actionHandlers: map[reflect.Type]actionHandler{ + reflect.TypeOf(&types.ActionDeleteNode{}): newDeleteNodeHandler(log, clientset), + reflect.TypeOf(&types.ActionDrainNode{}): newDrainNodeHandler(log, clientset, cfg.Namespace), + reflect.TypeOf(&types.ActionPatchNode{}): newPatchNodeHandler(log, clientset), + reflect.TypeOf(&types.ActionCreateEvent{}): newCreateEventHandler(log, clientset), + reflect.TypeOf(&types.ActionApproveCSR{}): newApproveCSRHandler(log, clientset), + reflect.TypeOf(&types.ActionChartUpsert{}): newChartUpsertHandler(log, helmClient), + reflect.TypeOf(&types.ActionChartUninstall{}): newChartUninstallHandler(log, helmClient), + reflect.TypeOf(&types.ActionChartRollback{}): newChartRollbackHandler(log, helmClient, cfg.Version), + reflect.TypeOf(&types.ActionDisconnectCluster{}): newDisconnectClusterHandler(log, clientset), + reflect.TypeOf(&types.ActionSendAKSInitData{}): newSendAKSInitDataHandler(log, castaiClient), + reflect.TypeOf(&types.ActionCheckNodeDeleted{}): newCheckNodeDeletedHandler(log, clientset), + reflect.TypeOf(&types.ActionCheckNodeStatus{}): newCheckNodeStatusHandler(log, clientset), + reflect.TypeOf(&types.ActionPatch{}): newPatchHandler(log, dynamicClient), + reflect.TypeOf(&types.ActionCreate{}): newCreateHandler(log, dynamicClient), + reflect.TypeOf(&types.ActionDelete{}): newDeleteHandler(log, dynamicClient), }, healthCheck: healthCheck, } } -type service struct { +// Service can continuously poll and handle actions. +type Service struct { log logrus.FieldLogger cfg Config - castAIClient castai.ActionsClient + castAIClient Client k8sVersion string - actionHandlers map[reflect.Type]ActionHandler + actionHandlers map[reflect.Type]actionHandler startedActionsWg sync.WaitGroup startedActions map[string]struct{} @@ -101,7 +111,8 @@ type service struct { healthCheck *health.HealthzProvider } -func (s *service) Run(ctx context.Context) { +// Run starts polling and handling actions. +func (s *Service) Run(ctx context.Context) { s.healthCheck.Initializing() for { select { @@ -123,11 +134,11 @@ func (s *service) Run(ctx context.Context) { } } -func (s *service) doWork(ctx context.Context) error { +func (s *Service) doWork(ctx context.Context) error { s.log.Info("polling actions") start := time.Now() var ( - actions []*castai.ClusterAction + actions []*types.ClusterAction err error iteration int ) @@ -161,13 +172,13 @@ func (s *service) doWork(ctx context.Context) error { return nil } -func (s *service) handleActions(ctx context.Context, actions []*castai.ClusterAction) { +func (s *Service) handleActions(ctx context.Context, actions []*types.ClusterAction) { for _, action := range actions { if !s.startProcessing(action.ID) { continue } - go func(action *castai.ClusterAction) { + go func(action *types.ClusterAction) { defer s.finishProcessing(action.ID) var err error @@ -193,7 +204,7 @@ func (s *service) handleActions(ctx context.Context, actions []*castai.ClusterAc } } -func (s *service) finishProcessing(actionID string) { +func (s *Service) finishProcessing(actionID string) { s.startedActionsMu.Lock() defer s.startedActionsMu.Unlock() @@ -201,7 +212,7 @@ func (s *service) finishProcessing(actionID string) { delete(s.startedActions, actionID) } -func (s *service) startProcessing(actionID string) bool { +func (s *Service) startProcessing(actionID string) bool { s.startedActionsMu.Lock() defer s.startedActionsMu.Unlock() @@ -214,7 +225,7 @@ func (s *service) startProcessing(actionID string) bool { return true } -func (s *service) handleAction(ctx context.Context, action *castai.ClusterAction) (err error) { +func (s *Service) handleAction(ctx context.Context, action *types.ClusterAction) (err error) { actionType := reflect.TypeOf(action.Data()) defer func() { @@ -238,7 +249,7 @@ func (s *service) handleAction(ctx context.Context, action *castai.ClusterAction return nil } -func (s *service) ackAction(ctx context.Context, action *castai.ClusterAction, handleErr error) error { +func (s *Service) ackAction(ctx context.Context, action *types.ClusterAction, handleErr error) error { actionType := reflect.TypeOf(action.Data()) s.log.WithFields(logrus.Fields{ actionIDLogField: action.ID, @@ -250,9 +261,7 @@ func (s *service) ackAction(ctx context.Context, action *castai.ClusterAction, h return waitext.Retry(ctx, boff, s.cfg.AckRetriesCount, func(ctx context.Context) (bool, error) { ctx, cancel := context.WithTimeout(ctx, s.cfg.AckTimeout) defer cancel() - return true, s.castAIClient.AckAction(ctx, action.ID, &castai.AckClusterActionRequest{ - Error: getHandlerError(handleErr), - }) + return true, s.castAIClient.AckAction(ctx, action.ID, getHandlerError(handleErr)) }, func(err error) { s.log.Debugf("ack failed, will retry: %v", err) }) diff --git a/actions/actions_test.go b/actions/actions_test.go index 48f44664..c8c38359 100644 --- a/actions/actions_test.go +++ b/actions/actions_test.go @@ -3,18 +3,17 @@ package actions import ( "context" "errors" - "sort" "testing" "time" + "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" "go.uber.org/goleak" - "github.com/castai/cluster-controller/castai" - "github.com/castai/cluster-controller/castai/mock" + mock_actions "github.com/castai/cluster-controller/actions/mock" "github.com/castai/cluster-controller/health" + "github.com/castai/cluster-controller/types" ) func TestMain(m *testing.M) { @@ -33,7 +32,7 @@ func TestActions(t *testing.T) { ClusterID: uuid.New().String(), } - newTestService := func(handler ActionHandler, client castai.ActionsClient) *service { + newTestService := func(handler actionHandler, client Client) *Service { svc := NewService( log, cfg, @@ -43,7 +42,7 @@ func TestActions(t *testing.T) { client, nil, health.NewHealthzProvider(health.HealthzCfg{HealthyPollIntervalLimit: cfg.PollTimeout}, log), - ).(*service) + ) handlers := svc.actionHandlers // Patch handlers with a mock one. for k := range handlers { @@ -53,32 +52,34 @@ func TestActions(t *testing.T) { } t.Run("poll handle and ack", func(t *testing.T) { - r := require.New(t) - - apiActions := []*castai.ClusterAction{ + apiActions := []*types.ClusterAction{ { ID: "a1", CreatedAt: time.Now(), - ActionDeleteNode: &castai.ActionDeleteNode{ + ActionDeleteNode: &types.ActionDeleteNode{ NodeName: "n1", }, }, { ID: "a2", CreatedAt: time.Now(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "n1", }, }, { ID: "a3", CreatedAt: time.Now(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "n1", }, }, } - client := mock.NewMockAPIClient(apiActions) + client := mock_actions.NewMockClient(gomock.NewController(t)) + client.EXPECT().GetActions(gomock.Any(), gomock.Any()).Return(apiActions, nil) + client.EXPECT().AckAction(gomock.Any(), "a1", nil).Return(nil) + client.EXPECT().AckAction(gomock.Any(), "a2", nil).Return(nil) + client.EXPECT().AckAction(gomock.Any(), "a3", nil).Return(nil) handler := &mockAgentActionHandler{handleDelay: 2 * time.Millisecond} svc := newTestService(handler, client) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) @@ -86,116 +87,89 @@ func TestActions(t *testing.T) { cancel() svc.startedActionsWg.Wait() - r.Len(client.Acks, 3) - ids := make([]string, len(client.Acks)) - for i, ack := range client.Acks { - ids[i] = ack.ActionID - } - sort.Strings(ids) - r.Equal("a1", ids[0]) - r.Equal("a2", ids[1]) - r.Equal("a3", ids[2]) }() - svc.Run(ctx) + _ = svc.doWork(ctx) }) t.Run("continue polling on api error", func(t *testing.T) { - r := require.New(t) - - client := mock.NewMockAPIClient([]*castai.ClusterAction{}) - client.GetActionsErr = errors.New("ups") + client := mock_actions.NewMockClient(gomock.NewController(t)) + client.EXPECT().GetActions(gomock.Any(), gomock.Any()).Return(nil, errors.New("ups")) handler := &mockAgentActionHandler{err: errors.New("ups")} svc := newTestService(handler, client) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) defer func() { cancel() svc.startedActionsWg.Wait() - - r.Len(client.Acks, 0) }() - svc.Run(ctx) + _ = svc.doWork(ctx) }) t.Run("do not ack action on context canceled error", func(t *testing.T) { - r := require.New(t) - - apiActions := []*castai.ClusterAction{ + apiActions := []*types.ClusterAction{ { ID: "a1", CreatedAt: time.Now(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "n1", }, }, } - client := mock.NewMockAPIClient(apiActions) + client := mock_actions.NewMockClient(gomock.NewController(t)) + client.EXPECT().GetActions(gomock.Any(), gomock.Any()).Return(apiActions, nil) handler := &mockAgentActionHandler{err: context.Canceled} svc := newTestService(handler, client) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) defer func() { cancel() svc.startedActionsWg.Wait() - - r.NotEmpty(client.Actions) - r.Len(client.Acks, 0) }() - svc.Run(ctx) + _ = svc.doWork(ctx) }) t.Run("ack with error when action handler failed", func(t *testing.T) { - r := require.New(t) - - apiActions := []*castai.ClusterAction{ + apiActions := []*types.ClusterAction{ { ID: "a1", CreatedAt: time.Now(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "n1", }, }, } - client := mock.NewMockAPIClient(apiActions) + client := mock_actions.NewMockClient(gomock.NewController(t)) + client.EXPECT().GetActions(gomock.Any(), gomock.Any()).Return(apiActions, nil) + client.EXPECT().AckAction(gomock.Any(), "a1", gomock.Not(gomock.Nil())).Return(nil) handler := &mockAgentActionHandler{err: errors.New("ups")} svc := newTestService(handler, client) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) defer func() { cancel() svc.startedActionsWg.Wait() - - r.Empty(client.Actions) - r.Len(client.Acks, 1) - r.Equal("a1", client.Acks[0].ActionID) - r.Equal("handling action *castai.ActionPatchNode: ups", *client.Acks[0].Err) }() svc.Run(ctx) }) t.Run("ack with error when action handler panic occurred", func(t *testing.T) { - r := require.New(t) - - apiActions := []*castai.ClusterAction{ + apiActions := []*types.ClusterAction{ { ID: "a1", CreatedAt: time.Now(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "n1", }, }, } - client := mock.NewMockAPIClient(apiActions) + client := mock_actions.NewMockClient(gomock.NewController(t)) + client.EXPECT().GetActions(gomock.Any(), gomock.Any()).Return(apiActions, nil) + client.EXPECT().AckAction(gomock.Any(), "a1", gomock.Not(gomock.Nil())).Return(nil) handler := &mockAgentActionHandler{panicErr: errors.New("ups")} svc := newTestService(handler, client) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) defer func() { cancel() svc.startedActionsWg.Wait() - - r.Empty(client.Actions) - r.Len(client.Acks, 1) - r.Equal("a1", client.Acks[0].ActionID) - r.Contains(*client.Acks[0].Err, "panic: handling action *castai.ActionPatchNode: ups: goroutine") }() - svc.Run(ctx) + _ = svc.doWork(ctx) }) } @@ -205,7 +179,7 @@ type mockAgentActionHandler struct { handleDelay time.Duration } -func (m *mockAgentActionHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { +func (m *mockAgentActionHandler) Handle(_ context.Context, _ *types.ClusterAction) error { time.Sleep(m.handleDelay) if m.panicErr != nil { panic(m.panicErr) diff --git a/actions/approve_csr_handler.go b/actions/approve_csr_handler.go index eddeaf9b..411ca6d1 100644 --- a/actions/approve_csr_handler.go +++ b/actions/approve_csr_handler.go @@ -11,8 +11,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/csr" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) @@ -20,7 +20,7 @@ const ( approveCSRTimeout = 4 * time.Minute ) -func newApproveCSRHandler(log logrus.FieldLogger, clientset kubernetes.Interface) ActionHandler { +func newApproveCSRHandler(log logrus.FieldLogger, clientset kubernetes.Interface) actionHandler { return &approveCSRHandler{ log: log, clientset: clientset, @@ -36,15 +36,15 @@ type approveCSRHandler struct { csrFetchInterval time.Duration } -func (h *approveCSRHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionApproveCSR) +func (h *approveCSRHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionApproveCSR) if !ok { return fmt.Errorf("unexpected type %T for approve csr handler", action.Data()) } log := h.log.WithFields(logrus.Fields{ "node_name": req.NodeName, "node_id": req.NodeID, - "type": reflect.TypeOf(action.Data().(*castai.ActionApproveCSR)).String(), + "type": reflect.TypeOf(action.Data().(*types.ActionApproveCSR)).String(), actionIDLogField: action.ID, }) @@ -67,7 +67,7 @@ func (h *approveCSRHandler) Handle(ctx context.Context, action *castai.ClusterAc b, waitext.Forever, func(ctx context.Context) (bool, error) { - return true, h.handle(ctx, log, cert) + return true, h.handleCert(ctx, log, cert) }, func(err error) { log.Warnf("csr approval failed, will retry: %v", err) @@ -75,7 +75,7 @@ func (h *approveCSRHandler) Handle(ctx context.Context, action *castai.ClusterAc ) } -func (h *approveCSRHandler) handle(ctx context.Context, log logrus.FieldLogger, cert *csr.Certificate) (reterr error) { +func (h *approveCSRHandler) handleCert(ctx context.Context, log logrus.FieldLogger, cert *csr.Certificate) (reterr error) { // Since this new csr may be denied we need to delete it. log.Debug("deleting old csr") if err := csr.DeleteCertificate(ctx, h.clientset, cert); err != nil { @@ -136,7 +136,7 @@ func (h *approveCSRHandler) getInitialNodeCSR(ctx context.Context, log logrus.Fi ctx, b, 3, - func(ctx context.Context) (bool, error) { + func(_ context.Context) (bool, error) { cert, err = poll() if errors.Is(err, context.DeadlineExceeded) { return false, err diff --git a/actions/approve_csr_handler_test.go b/actions/approve_csr_handler_test.go index 6d54daa5..e2f9eae2 100644 --- a/actions/approve_csr_handler_test.go +++ b/actions/approve_csr_handler_test.go @@ -22,8 +22,8 @@ import ( "k8s.io/client-go/kubernetes/fake" ktest "k8s.io/client-go/testing" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/csr" + "github.com/castai/cluster-controller/types" ) func TestApproveCSRHandler(t *testing.T) { @@ -37,7 +37,7 @@ func TestApproveCSRHandler(t *testing.T) { client := fake.NewSimpleClientset(csrRes) var approveCalls int32 - client.PrependReactor("update", "certificatesigningrequests", func(action ktest.Action) (handled bool, ret runtime.Object, err error) { + client.PrependReactor("update", "certificatesigningrequests", func(_ ktest.Action) (handled bool, ret runtime.Object, err error) { approved := csrRes.DeepCopy() approved.Status.Conditions = []certv1.CertificateSigningRequestCondition{ { @@ -57,9 +57,9 @@ func TestApproveCSRHandler(t *testing.T) { return true, approved, nil }) - actionApproveCSR := &castai.ClusterAction{ + actionApproveCSR := &types.ClusterAction{ ID: uuid.New().String(), - ActionApproveCSR: &castai.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, + ActionApproveCSR: &types.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, CreatedAt: time.Time{}, } @@ -90,9 +90,9 @@ func TestApproveCSRHandler(t *testing.T) { } client := fake.NewSimpleClientset(csrRes) - actionApproveCSR := &castai.ClusterAction{ + actionApproveCSR := &types.ClusterAction{ ID: uuid.New().String(), - ActionApproveCSR: &castai.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, + ActionApproveCSR: &types.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, CreatedAt: time.Time{}, } h := &approveCSRHandler{ @@ -112,7 +112,7 @@ func TestApproveCSRHandler(t *testing.T) { csrRes := getCSR() count := 0 - fn := ktest.ReactionFunc(func(action ktest.Action) (handled bool, ret runtime.Object, err error) { + fn := ktest.ReactionFunc(func(_ ktest.Action) (handled bool, ret runtime.Object, err error) { if count == 0 { count++ return true, nil, errors.New("api server timeout") @@ -123,9 +123,9 @@ func TestApproveCSRHandler(t *testing.T) { client := fake.NewSimpleClientset(csrRes) client.PrependReactor("list", "certificatesigningrequests", fn) - actionApproveCSR := &castai.ClusterAction{ + actionApproveCSR := &types.ClusterAction{ ID: uuid.New().String(), - ActionApproveCSR: &castai.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, + ActionApproveCSR: &types.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, CreatedAt: time.Time{}, } h := &approveCSRHandler{ @@ -168,7 +168,7 @@ AiAHVYZXHxxspoV0hcfn2Pdsl89fIPCOFy/K1PqSUR6QNAIgYdt51ZbQt9rgM2BD } return }) - client.PrependReactor("update", "certificatesigningrequests", func(action ktest.Action) (handled bool, ret runtime.Object, err error) { + client.PrependReactor("update", "certificatesigningrequests", func(_ ktest.Action) (handled bool, ret runtime.Object, err error) { approved := csrRes.DeepCopy() approved.Status.Conditions = []certv1beta1.CertificateSigningRequestCondition{ { @@ -182,9 +182,9 @@ AiAHVYZXHxxspoV0hcfn2Pdsl89fIPCOFy/K1PqSUR6QNAIgYdt51ZbQt9rgM2BD return true, approved, nil }) - actionApproveCSR := &castai.ClusterAction{ + actionApproveCSR := &types.ClusterAction{ ID: uuid.New().String(), - ActionApproveCSR: &castai.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, + ActionApproveCSR: &types.ActionApproveCSR{NodeName: "gke-am-gcp-cast-5dc4f4ec"}, CreatedAt: time.Time{}, } h := &approveCSRHandler{ @@ -214,9 +214,9 @@ AiAHVYZXHxxspoV0hcfn2Pdsl89fIPCOFy/K1PqSUR6QNAIgYdt51ZbQt9rgM2BD watcher.Stop() client.PrependWatchReactor("certificatesigningrequests", ktest.DefaultWatchReactor(watcher, nil)) - actionApproveCSR := &castai.ClusterAction{ + actionApproveCSR := &types.ClusterAction{ ID: uuid.New().String(), - ActionApproveCSR: &castai.ActionApproveCSR{NodeName: "node"}, + ActionApproveCSR: &types.ActionApproveCSR{NodeName: "node"}, CreatedAt: time.Time{}, } diff --git a/actions/chart_rollback_handler.go b/actions/chart_rollback_handler.go index 17c0060c..ddc3e4ce 100644 --- a/actions/chart_rollback_handler.go +++ b/actions/chart_rollback_handler.go @@ -7,11 +7,11 @@ import ( "github.com/sirupsen/logrus" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm" + "github.com/castai/cluster-controller/types" ) -func newChartRollbackHandler(log logrus.FieldLogger, helm helm.Client, version string) ActionHandler { +func newChartRollbackHandler(log logrus.FieldLogger, helm helm.Client, version string) actionHandler { return &chartRollbackHandler{ log: log, helm: helm, @@ -25,8 +25,8 @@ type chartRollbackHandler struct { version string } -func (c *chartRollbackHandler) Handle(_ context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionChartRollback) +func (c *chartRollbackHandler) Handle(_ context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionChartRollback) if !ok { return fmt.Errorf("unexpected type %T for chart rollback handler", action.Data()) } @@ -46,7 +46,7 @@ func (c *chartRollbackHandler) Handle(_ context.Context, action *castai.ClusterA }) } -func (c *chartRollbackHandler) validateRequest(req *castai.ActionChartRollback) error { +func (c *chartRollbackHandler) validateRequest(req *types.ActionChartRollback) error { if req.ReleaseName == "" { return errors.New("bad request: releaseName not provided") } diff --git a/actions/chart_rollback_handler_test.go b/actions/chart_rollback_handler_test.go index 8989ce70..6ea25e10 100644 --- a/actions/chart_rollback_handler_test.go +++ b/actions/chart_rollback_handler_test.go @@ -3,16 +3,16 @@ package actions import ( "context" "fmt" - "github.com/google/uuid" "testing" "github.com/golang/mock/gomock" + "github.com/google/uuid" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm" mock_helm "github.com/castai/cluster-controller/helm/mock" + "github.com/castai/cluster-controller/types" ) func TestChartRollbackHandler(t *testing.T) { @@ -23,8 +23,8 @@ func TestChartRollbackHandler(t *testing.T) { handler := newChartRollbackHandler(logrus.New(), helmMock, "v0.20.0") - t.Run("successfully rollback chart", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("successfully rollback chart", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartRollback: newRollbackAction(), } @@ -37,8 +37,8 @@ func TestChartRollbackHandler(t *testing.T) { r.NoError(handler.Handle(ctx, action)) }) - t.Run("skip rollback if version mismatch", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("skip rollback if version mismatch", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartRollback: newRollbackAction(), } @@ -46,8 +46,8 @@ func TestChartRollbackHandler(t *testing.T) { r.NoError(handler.Handle(ctx, action)) }) - t.Run("error when rolling back chart", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("error when rolling back chart", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartRollback: newRollbackAction(), } @@ -60,8 +60,8 @@ func TestChartRollbackHandler(t *testing.T) { r.Error(handler.Handle(ctx, action), someError) }) - t.Run("namespace is missing in rollback action", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("namespace is missing in rollback action", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartRollback: newRollbackAction(), } @@ -70,8 +70,8 @@ func TestChartRollbackHandler(t *testing.T) { r.Error(handler.Handle(ctx, action)) }) - t.Run("helm release is missing in rollback action", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("helm release is missing in rollback action", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartRollback: newRollbackAction(), } @@ -81,8 +81,8 @@ func TestChartRollbackHandler(t *testing.T) { }) } -func newRollbackAction() *castai.ActionChartRollback { - return &castai.ActionChartRollback{ +func newRollbackAction() *types.ActionChartRollback { + return &types.ActionChartRollback{ Namespace: "test", ReleaseName: "new-release", Version: "v0.20.0", diff --git a/actions/chart_uninstall_handler.go b/actions/chart_uninstall_handler.go index 3692da05..43256cce 100644 --- a/actions/chart_uninstall_handler.go +++ b/actions/chart_uninstall_handler.go @@ -7,11 +7,11 @@ import ( "github.com/sirupsen/logrus" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm" + "github.com/castai/cluster-controller/types" ) -func newChartUninstallHandler(log logrus.FieldLogger, helm helm.Client) ActionHandler { +func newChartUninstallHandler(log logrus.FieldLogger, helm helm.Client) actionHandler { return &chartUninstallHandler{ log: log, helm: helm, @@ -23,8 +23,8 @@ type chartUninstallHandler struct { helm helm.Client } -func (c *chartUninstallHandler) Handle(_ context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionChartUninstall) +func (c *chartUninstallHandler) Handle(_ context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionChartUninstall) if !ok { return fmt.Errorf("unexpected type %T for upsert uninstall handler", action.Data()) } @@ -39,7 +39,7 @@ func (c *chartUninstallHandler) Handle(_ context.Context, action *castai.Cluster return err } -func (c *chartUninstallHandler) validateRequest(req *castai.ActionChartUninstall) error { +func (c *chartUninstallHandler) validateRequest(req *types.ActionChartUninstall) error { if req.ReleaseName == "" { return errors.New("bad request: releaseName not provided") } diff --git a/actions/chart_uninstall_handler_test.go b/actions/chart_uninstall_handler_test.go index 7985dfae..c2a057c7 100644 --- a/actions/chart_uninstall_handler_test.go +++ b/actions/chart_uninstall_handler_test.go @@ -3,16 +3,16 @@ package actions import ( "context" "fmt" - "github.com/google/uuid" "testing" "github.com/golang/mock/gomock" + "github.com/google/uuid" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm" mock_helm "github.com/castai/cluster-controller/helm/mock" + "github.com/castai/cluster-controller/types" ) func TestChartUninstallHandler(t *testing.T) { @@ -23,8 +23,8 @@ func TestChartUninstallHandler(t *testing.T) { handler := newChartUninstallHandler(logrus.New(), helmMock) - t.Run("successfully uninstall chart", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("successfully uninstall chart", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUninstall: newUninstallAction(), } @@ -37,8 +37,8 @@ func TestChartUninstallHandler(t *testing.T) { r.NoError(handler.Handle(ctx, action)) }) - t.Run("error when uninstalling chart", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("error when uninstalling chart", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUninstall: newUninstallAction(), } @@ -52,8 +52,8 @@ func TestChartUninstallHandler(t *testing.T) { r.Error(handler.Handle(ctx, action), someError) }) - t.Run("namespace is missing in uninstall action", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("namespace is missing in uninstall action", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUninstall: newUninstallAction(), } @@ -62,8 +62,8 @@ func TestChartUninstallHandler(t *testing.T) { r.Error(handler.Handle(ctx, action)) }) - t.Run("helm release is missing in uninstall action", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("helm release is missing in uninstall action", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUninstall: newUninstallAction(), } @@ -73,8 +73,8 @@ func TestChartUninstallHandler(t *testing.T) { }) } -func newUninstallAction() *castai.ActionChartUninstall { - return &castai.ActionChartUninstall{ +func newUninstallAction() *types.ActionChartUninstall { + return &types.ActionChartUninstall{ Namespace: "test", ReleaseName: "new-release", } diff --git a/actions/chart_upsert_handler.go b/actions/chart_upsert_handler.go index dedbb8bb..3de2a31c 100644 --- a/actions/chart_upsert_handler.go +++ b/actions/chart_upsert_handler.go @@ -9,11 +9,11 @@ import ( "helm.sh/helm/v3/pkg/release" helmdriver "helm.sh/helm/v3/pkg/storage/driver" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm" + "github.com/castai/cluster-controller/types" ) -func newChartUpsertHandler(log logrus.FieldLogger, helm helm.Client) ActionHandler { +func newChartUpsertHandler(log logrus.FieldLogger, helm helm.Client) actionHandler { return &chartUpsertHandler{ log: log, helm: helm, @@ -25,8 +25,8 @@ type chartUpsertHandler struct { helm helm.Client } -func (c *chartUpsertHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionChartUpsert) +func (c *chartUpsertHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionChartUpsert) if !ok { return fmt.Errorf("unexpected type %T for upsert chart handler", action.Data()) } @@ -74,7 +74,7 @@ func (c *chartUpsertHandler) Handle(ctx context.Context, action *castai.ClusterA return err } -func (c *chartUpsertHandler) validateRequest(req *castai.ActionChartUpsert) error { +func (c *chartUpsertHandler) validateRequest(req *types.ActionChartUpsert) error { if req.ReleaseName == "" { return errors.New("bad request: releaseName not provided") } diff --git a/actions/chart_upsert_handler_test.go b/actions/chart_upsert_handler_test.go index d4090cc9..65144e29 100644 --- a/actions/chart_upsert_handler_test.go +++ b/actions/chart_upsert_handler_test.go @@ -2,18 +2,18 @@ package actions import ( "context" - "github.com/google/uuid" "testing" "github.com/golang/mock/gomock" + "github.com/google/uuid" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "helm.sh/helm/v3/pkg/release" helmdriver "helm.sh/helm/v3/pkg/storage/driver" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm" mock_helm "github.com/castai/cluster-controller/helm/mock" + "github.com/castai/cluster-controller/types" ) func TestChartUpsertHandler(t *testing.T) { @@ -24,8 +24,8 @@ func TestChartUpsertHandler(t *testing.T) { handler := newChartUpsertHandler(logrus.New(), helmMock) - t.Run("install chart given release is not found", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("install chart given release is not found", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUpsert: chartUpsertAction(), } @@ -45,8 +45,8 @@ func TestChartUpsertHandler(t *testing.T) { r.NoError(handler.Handle(ctx, action)) }) - t.Run("upgrade chart given release is found", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("upgrade chart given release is found", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUpsert: chartUpsertAction(), } @@ -75,8 +75,8 @@ func TestChartUpsertHandler(t *testing.T) { r.NoError(handler.Handle(ctx, action)) }) - t.Run("rollback previous release before upgrade", func(t *testing.T) { - action := &castai.ClusterAction{ + t.Run("rollback previous release before upgrade", func(_ *testing.T) { + action := &types.ClusterAction{ ID: uuid.New().String(), ActionChartUpsert: chartUpsertAction(), } @@ -103,12 +103,12 @@ func TestChartUpsertHandler(t *testing.T) { }) } -func chartUpsertAction() *castai.ActionChartUpsert { - return &castai.ActionChartUpsert{ +func chartUpsertAction() *types.ActionChartUpsert { + return &types.ActionChartUpsert{ Namespace: "test", ReleaseName: "new-release", ValuesOverrides: map[string]string{"image.tag": "1.0.0"}, - ChartSource: castai.ChartSource{ + ChartSource: types.ChartSource{ RepoURL: "https://my-charts.repo", Name: "super-chart", Version: "1.5.0", diff --git a/actions/check_node_deleted.go b/actions/check_node_deleted.go index c12cb4e5..86ceb643 100644 --- a/actions/check_node_deleted.go +++ b/actions/check_node_deleted.go @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) @@ -21,7 +21,7 @@ type checkNodeDeletedConfig struct { retryWait time.Duration } -func newCheckNodeDeletedHandler(log logrus.FieldLogger, clientset kubernetes.Interface) ActionHandler { +func newCheckNodeDeletedHandler(log logrus.FieldLogger, clientset kubernetes.Interface) actionHandler { return &checkNodeDeletedHandler{ log: log, clientset: clientset, @@ -38,8 +38,8 @@ type checkNodeDeletedHandler struct { cfg checkNodeDeletedConfig } -func (h *checkNodeDeletedHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionCheckNodeDeleted) +func (h *checkNodeDeletedHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionCheckNodeDeleted) if !ok { return fmt.Errorf("unexpected type %T for check node deleted handler", action.Data()) } @@ -47,7 +47,7 @@ func (h *checkNodeDeletedHandler) Handle(ctx context.Context, action *castai.Clu log := h.log.WithFields(logrus.Fields{ "node_name": req.NodeName, "node_id": req.NodeID, - "type": reflect.TypeOf(action.Data().(*castai.ActionCheckNodeDeleted)).String(), + "type": reflect.TypeOf(action.Data().(*types.ActionCheckNodeDeleted)).String(), actionIDLogField: action.ID, }) log.Info("checking if node is deleted") @@ -68,7 +68,7 @@ func (h *checkNodeDeletedHandler) Handle(ctx context.Context, action *castai.Clu return false, nil } - currentNodeID, ok := n.Labels[castai.LabelNodeID] + currentNodeID, ok := n.Labels[labelNodeID] if !ok { log.Info("node doesn't have castai node id label") } diff --git a/actions/check_node_handler_test.go b/actions/check_node_handler_test.go index 2cd06505..ee324f2f 100644 --- a/actions/check_node_handler_test.go +++ b/actions/check_node_handler_test.go @@ -2,16 +2,16 @@ package actions import ( "context" - "github.com/google/uuid" "testing" + "github.com/google/uuid" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestCheckNodeDeletedHandler(t *testing.T) { @@ -20,7 +20,7 @@ func TestCheckNodeDeletedHandler(t *testing.T) { log := logrus.New() log.SetLevel(logrus.DebugLevel) - t.Run("return error when node is not deleted", func(t *testing.T) { + t.Run("return error when node is not deleted", func(_ *testing.T) { nodeName := "node1" node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -35,16 +35,16 @@ func TestCheckNodeDeletedHandler(t *testing.T) { cfg: checkNodeDeletedConfig{}, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeDeleted: &castai.ActionCheckNodeDeleted{NodeName: "node1"}, + ActionCheckNodeDeleted: &types.ActionCheckNodeDeleted{NodeName: "node1"}, } err := h.Handle(context.Background(), action) r.EqualError(err, "node is not deleted") }) - t.Run("handle check successfully when node is not found", func(t *testing.T) { + t.Run("handle check successfully when node is not found", func(_ *testing.T) { clientset := fake.NewSimpleClientset() h := checkNodeDeletedHandler{ @@ -53,9 +53,9 @@ func TestCheckNodeDeletedHandler(t *testing.T) { cfg: checkNodeDeletedConfig{}, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeDeleted: &castai.ActionCheckNodeDeleted{NodeName: "node1"}, + ActionCheckNodeDeleted: &types.ActionCheckNodeDeleted{NodeName: "node1"}, } err := h.Handle(context.Background(), action) diff --git a/actions/check_node_status.go b/actions/check_node_status.go index 3f24e731..7e0e9cd8 100644 --- a/actions/check_node_status.go +++ b/actions/check_node_status.go @@ -14,11 +14,11 @@ import ( "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) -func newCheckNodeStatusHandler(log logrus.FieldLogger, clientset kubernetes.Interface) ActionHandler { +func newCheckNodeStatusHandler(log logrus.FieldLogger, clientset kubernetes.Interface) actionHandler { return &checkNodeStatusHandler{ log: log, clientset: clientset, @@ -30,8 +30,8 @@ type checkNodeStatusHandler struct { clientset kubernetes.Interface } -func (h *checkNodeStatusHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionCheckNodeStatus) +func (h *checkNodeStatusHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionCheckNodeStatus) if !ok { return fmt.Errorf("unexpected type %T for check node status handler", action.Data()) } @@ -40,15 +40,15 @@ func (h *checkNodeStatusHandler) Handle(ctx context.Context, action *castai.Clus "node_name": req.NodeName, "node_id": req.NodeID, "node_status": req.NodeStatus, - "type": reflect.TypeOf(action.Data().(*castai.ActionCheckNodeStatus)).String(), + "type": reflect.TypeOf(action.Data().(*types.ActionCheckNodeStatus)).String(), actionIDLogField: action.ID, }) switch req.NodeStatus { - case castai.ActionCheckNodeStatus_READY: + case types.ActionCheckNodeStatus_READY: log.Info("checking node ready") - return h.checkNodeReady(ctx, log, req) - case castai.ActionCheckNodeStatus_DELETED: + return h.checkNodeReady(ctx, req) + case types.ActionCheckNodeStatus_DELETED: log.Info("checking node deleted") return h.checkNodeDeleted(ctx, log, req) @@ -57,7 +57,7 @@ func (h *checkNodeStatusHandler) Handle(ctx context.Context, action *castai.Clus return fmt.Errorf("unknown status to check provided node=%s status=%s", req.NodeName, req.NodeStatus) } -func (h *checkNodeStatusHandler) checkNodeDeleted(ctx context.Context, log *logrus.Entry, req *castai.ActionCheckNodeStatus) error { +func (h *checkNodeStatusHandler) checkNodeDeleted(ctx context.Context, log *logrus.Entry, req *types.ActionCheckNodeStatus) error { timeout := 10 if req.WaitTimeoutSeconds != nil { timeout = int(*req.WaitTimeoutSeconds) @@ -85,7 +85,7 @@ func (h *checkNodeStatusHandler) checkNodeDeleted(ctx context.Context, log *logr return false, nil } - currentNodeID, ok := n.Labels[castai.LabelNodeID] + currentNodeID, ok := n.Labels[labelNodeID] if !ok { log.Info("node doesn't have castai node id label") } @@ -111,7 +111,7 @@ func (h *checkNodeStatusHandler) checkNodeDeleted(ctx context.Context, log *logr ) } -func (h *checkNodeStatusHandler) checkNodeReady(ctx context.Context, log *logrus.Entry, req *castai.ActionCheckNodeStatus) error { +func (h *checkNodeStatusHandler) checkNodeReady(ctx context.Context, req *types.ActionCheckNodeStatus) error { timeout := 9 * time.Minute watchObject := metav1.SingleObject(metav1.ObjectMeta{Name: req.NodeName}) if req.WaitTimeoutSeconds != nil { @@ -140,7 +140,7 @@ func (h *checkNodeStatusHandler) checkNodeReady(ctx context.Context, log *logrus func isNodeReady(node *corev1.Node, castNodeID string) bool { // if node has castai node id label, check if it matches the one we are waiting for // if it doesn't match, we can skip this node - if val, ok := node.Labels[castai.LabelNodeID]; ok { + if val, ok := node.Labels[labelNodeID]; ok { if val != "" && val != castNodeID { return false } diff --git a/actions/check_node_status_test.go b/actions/check_node_status_test.go index dc96b187..7ef7e533 100644 --- a/actions/check_node_status_test.go +++ b/actions/check_node_status_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/google/uuid" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" @@ -16,7 +15,7 @@ import ( "k8s.io/client-go/kubernetes/fake" k8stest "k8s.io/client-go/testing" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestCheckStatus_Deleted(t *testing.T) { @@ -30,7 +29,7 @@ func TestCheckStatus_Deleted(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - castai.LabelNodeID: "old-node-id", + labelNodeID: "old-node-id", }, }, } @@ -41,11 +40,11 @@ func TestCheckStatus_Deleted(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_DELETED, + NodeStatus: types.ActionCheckNodeStatus_DELETED, NodeID: "old-node-id", }, } @@ -69,11 +68,11 @@ func TestCheckStatus_Deleted(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_DELETED, + NodeStatus: types.ActionCheckNodeStatus_DELETED, NodeID: "old-node-id", }, } @@ -91,11 +90,11 @@ func TestCheckStatus_Deleted(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_DELETED, + NodeStatus: types.ActionCheckNodeStatus_DELETED, NodeID: "old-node-id", }, } @@ -110,7 +109,7 @@ func TestCheckStatus_Deleted(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node1", Labels: map[string]string{ - castai.LabelNodeID: "old-node-id", + labelNodeID: "old-node-id", }, }, } @@ -121,11 +120,11 @@ func TestCheckStatus_Deleted(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_DELETED, + NodeStatus: types.ActionCheckNodeStatus_DELETED, NodeID: "im-a-different-node", }, } @@ -157,11 +156,11 @@ func TestCheckStatus_Ready(t *testing.T) { }() timeout := int32(1) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_READY, + NodeStatus: types.ActionCheckNodeStatus_READY, WaitTimeoutSeconds: &timeout, }, } @@ -194,11 +193,11 @@ func TestCheckStatus_Ready(t *testing.T) { } timeout := int32(60) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_READY, + NodeStatus: types.ActionCheckNodeStatus_READY, WaitTimeoutSeconds: &timeout, }, } @@ -249,11 +248,11 @@ func TestCheckStatus_Ready(t *testing.T) { } timeout := int32(60) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_READY, + NodeStatus: types.ActionCheckNodeStatus_READY, WaitTimeoutSeconds: &timeout, }, } @@ -302,11 +301,11 @@ func TestCheckStatus_Ready(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_READY, + NodeStatus: types.ActionCheckNodeStatus_READY, }, } @@ -322,7 +321,7 @@ func TestCheckStatus_Ready(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - castai.LabelNodeID: "old-node-id", + labelNodeID: "old-node-id", }, }, Status: v1.NodeStatus{ @@ -342,11 +341,11 @@ func TestCheckStatus_Ready(t *testing.T) { } timeout := int32(60) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionCheckNodeStatus: &castai.ActionCheckNodeStatus{ + ActionCheckNodeStatus: &types.ActionCheckNodeStatus{ NodeName: "node1", - NodeStatus: castai.ActionCheckNodeStatus_READY, + NodeStatus: types.ActionCheckNodeStatus_READY, WaitTimeoutSeconds: &timeout, NodeID: "new-node-id", }, @@ -371,7 +370,7 @@ func TestCheckStatus_Ready(t *testing.T) { time.Sleep(1 * time.Second) newNode := node.DeepCopy() - newNode.Labels[castai.LabelNodeID] = "new-node-id" + newNode.Labels[labelNodeID] = "new-node-id" _, _ = clientset.CoreV1().Nodes().Create(context.Background(), newNode, metav1.CreateOptions{}) diff --git a/actions/create_event_handler.go b/actions/create_event_handler.go index cc5643dd..da21c846 100644 --- a/actions/create_event_handler.go +++ b/actions/create_event_handler.go @@ -10,14 +10,14 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) -func newCreateEventHandler(log logrus.FieldLogger, clientset kubernetes.Interface) ActionHandler { +func newCreateEventHandler(log logrus.FieldLogger, clientset kubernetes.Interface) actionHandler { return &createEventHandler{ log: log, clientset: clientset, @@ -29,8 +29,8 @@ type createEventHandler struct { clientset kubernetes.Interface } -func (h *createEventHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionCreateEvent) +func (h *createEventHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionCreateEvent) if !ok { return fmt.Errorf("unexpected type %T for create event handler", action.Data()) } @@ -86,7 +86,7 @@ func (h *createEventHandler) Handle(ctx context.Context, action *castai.ClusterA _, err = h.clientset.CoreV1(). Events(event.Namespace). - Patch(ctx, similarEvent.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + Patch(ctx, similarEvent.Name, k8stypes.StrategicMergePatchType, patch, metav1.PatchOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("patching event for ref %v: %w", req.ObjectRef, err) } diff --git a/actions/create_event_handler_test.go b/actions/create_event_handler_test.go index 50455acd..5eb41467 100644 --- a/actions/create_event_handler_test.go +++ b/actions/create_event_handler_test.go @@ -12,28 +12,28 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" + k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestCreateEvent(t *testing.T) { r := require.New(t) - id := types.UID(uuid.New().String()) + id := k8stypes.UID(uuid.New().String()) tests := []struct { name string - action *castai.ClusterAction + action *types.ClusterAction actionCount int object runtime.Object expectedEvent *corev1.Event }{ { name: "create single pod event", - action: &castai.ClusterAction{ + action: &types.ClusterAction{ ID: uuid.New().String(), - ActionCreateEvent: &castai.ActionCreateEvent{ + ActionCreateEvent: &types.ActionCreateEvent{ Reporter: "autoscaler.cast.ai", ObjectRef: podObjReference(testPod(id)), EventTime: time.Now(), @@ -57,9 +57,9 @@ func TestCreateEvent(t *testing.T) { }, { name: "create several pod events", - action: &castai.ClusterAction{ + action: &types.ClusterAction{ ID: "", - ActionCreateEvent: &castai.ActionCreateEvent{ + ActionCreateEvent: &types.ActionCreateEvent{ Reporter: "provisioning.cast.ai", ObjectRef: podObjReference(testPod(id)), EventTime: time.Now(), @@ -83,7 +83,7 @@ func TestCreateEvent(t *testing.T) { }, } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, func(_ *testing.T) { clientSet := fake.NewSimpleClientset(test.object) h := createEventHandler{ log: logrus.New(), @@ -115,7 +115,7 @@ func TestCreateEvent(t *testing.T) { } } -func testPod(id types.UID) *corev1.Pod { +func testPod(id k8stypes.UID) *corev1.Pod { return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", diff --git a/actions/create_handler.go b/actions/create_handler.go index 4f757911..2c2e031e 100644 --- a/actions/create_handler.go +++ b/actions/create_handler.go @@ -12,10 +12,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" + k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) type createHandler struct { @@ -23,15 +23,15 @@ type createHandler struct { client dynamic.Interface } -func newCreateHandler(log logrus.FieldLogger, client dynamic.Interface) ActionHandler { +func newCreateHandler(log logrus.FieldLogger, client dynamic.Interface) actionHandler { return &createHandler{ log: log, client: client, } } -func (h *createHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionCreate) +func (h *createHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionCreate) if !ok { return newUnexpectedTypeErr(action.Data(), req) } @@ -104,7 +104,7 @@ func (h *createHandler) Handle(ctx context.Context, action *castai.ClusterAction } log.Infof("patching resource: %s", patch) - _, err = r.Patch(ctx, obj.GetName(), types.MergePatchType, patch, metav1.PatchOptions{}) + _, err = r.Patch(ctx, obj.GetName(), k8stypes.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { return fmt.Errorf("patching resource %v: %w", obj.GetName(), err) } diff --git a/actions/create_handler_test.go b/actions/create_handler_test.go index d930bb4b..5a8fc35a 100644 --- a/actions/create_handler_test.go +++ b/actions/create_handler_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/client-go/dynamic/fake" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func Test_newCreateHandler(t *testing.T) { @@ -25,29 +25,29 @@ func Test_newCreateHandler(t *testing.T) { tests := map[string]struct { objs []runtime.Object - action *castai.ClusterAction + action *types.ClusterAction convertFn func(i map[string]interface{}) client.Object err error want *appsv1.Deployment }{ "should return error when action is of a different type": { - action: &castai.ClusterAction{ - ActionDeleteNode: &castai.ActionDeleteNode{}, + action: &types.ClusterAction{ + ActionDeleteNode: &types.ActionDeleteNode{}, }, - err: newUnexpectedTypeErr(&castai.ActionDeleteNode{}, &castai.ActionCreate{}), + err: newUnexpectedTypeErr(&types.ActionDeleteNode{}, &types.ActionCreate{}), }, "should return error when object is not provided": { - action: &castai.ClusterAction{ - ActionCreate: &castai.ActionCreate{ - GroupVersionResource: castai.GroupVersionResource{}, + action: &types.ClusterAction{ + ActionCreate: &types.ActionCreate{ + GroupVersionResource: types.GroupVersionResource{}, }, }, err: errors.New("no object provided"), }, "should create new deployment": { - action: &castai.ClusterAction{ - ActionCreate: &castai.ActionCreate{ - GroupVersionResource: castai.GroupVersionResource{ + action: &types.ClusterAction{ + ActionCreate: &types.ActionCreate{ + GroupVersionResource: types.GroupVersionResource{ Group: appsv1.SchemeGroupVersion.Group, Version: appsv1.SchemeGroupVersion.Version, Resource: "deployments", @@ -63,9 +63,9 @@ func Test_newCreateHandler(t *testing.T) { }, }, "should patch already existing resource": { - action: &castai.ClusterAction{ - ActionCreate: &castai.ActionCreate{ - GroupVersionResource: castai.GroupVersionResource{ + action: &types.ClusterAction{ + ActionCreate: &types.ActionCreate{ + GroupVersionResource: types.GroupVersionResource{ Group: appsv1.SchemeGroupVersion.Group, Version: appsv1.SchemeGroupVersion.Version, Resource: "deployments", @@ -75,7 +75,7 @@ func Test_newCreateHandler(t *testing.T) { })), }, }, - objs: []runtime.Object{newDeployment(func(d *appsv1.Deployment) {})}, + objs: []runtime.Object{newDeployment(func(_ *appsv1.Deployment) {})}, want: newDeployment(func(d *appsv1.Deployment) { d.Labels = map[string]string{"changed": "true"} }), diff --git a/actions/delete_handler.go b/actions/delete_handler.go index 8394a34d..23d24b6e 100644 --- a/actions/delete_handler.go +++ b/actions/delete_handler.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) type deleteHandler struct { @@ -19,15 +19,15 @@ type deleteHandler struct { client dynamic.Interface } -func newDeleteHandler(log logrus.FieldLogger, client dynamic.Interface) ActionHandler { +func newDeleteHandler(log logrus.FieldLogger, client dynamic.Interface) actionHandler { return &deleteHandler{ log: log, client: client, } } -func (h *deleteHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionDelete) +func (h *deleteHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionDelete) if !ok { return newUnexpectedTypeErr(action.Data(), req) } diff --git a/actions/delete_handler_test.go b/actions/delete_handler_test.go index 0c019970..4bd57c12 100644 --- a/actions/delete_handler_test.go +++ b/actions/delete_handler_test.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic/fake" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func Test_newDeleteHandler(t *testing.T) { @@ -25,21 +25,21 @@ func Test_newDeleteHandler(t *testing.T) { tests := map[string]struct { objs []runtime.Object - action *castai.ClusterAction + action *types.ClusterAction want int err error }{ "should return error when action is of a different type": { - action: &castai.ClusterAction{ - ActionDeleteNode: &castai.ActionDeleteNode{}, + action: &types.ClusterAction{ + ActionDeleteNode: &types.ActionDeleteNode{}, }, - err: newUnexpectedTypeErr(&castai.ActionDeleteNode{}, &castai.ActionDelete{}), + err: newUnexpectedTypeErr(&types.ActionDeleteNode{}, &types.ActionDelete{}), }, "should skip if resource not found": { - action: &castai.ClusterAction{ - ActionDelete: &castai.ActionDelete{ - ID: castai.ObjectID{ - GroupVersionResource: castai.GroupVersionResource{ + action: &types.ClusterAction{ + ActionDelete: &types.ActionDelete{ + ID: types.ObjectID{ + GroupVersionResource: types.GroupVersionResource{ Group: appsv1.SchemeGroupVersion.Group, Version: appsv1.SchemeGroupVersion.Version, Resource: "deployments", @@ -55,10 +55,10 @@ func Test_newDeleteHandler(t *testing.T) { want: 1, }, "should delete deployment": { - action: &castai.ClusterAction{ - ActionDelete: &castai.ActionDelete{ - ID: castai.ObjectID{ - GroupVersionResource: castai.GroupVersionResource{ + action: &types.ClusterAction{ + ActionDelete: &types.ActionDelete{ + ID: types.ObjectID{ + GroupVersionResource: types.GroupVersionResource{ Group: appsv1.SchemeGroupVersion.Group, Version: appsv1.SchemeGroupVersion.Version, Resource: "deployments", @@ -76,10 +76,10 @@ func Test_newDeleteHandler(t *testing.T) { want: 2, }, "should delete resource without namespace": { - action: &castai.ClusterAction{ - ActionDelete: &castai.ActionDelete{ - ID: castai.ObjectID{ - GroupVersionResource: castai.GroupVersionResource{ + action: &types.ClusterAction{ + ActionDelete: &types.ActionDelete{ + ID: types.ObjectID{ + GroupVersionResource: types.GroupVersionResource{ Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Resource: "nodes", diff --git a/actions/delete_node_handler.go b/actions/delete_node_handler.go index 7b299775..0c2e24e9 100644 --- a/actions/delete_node_handler.go +++ b/actions/delete_node_handler.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) @@ -26,7 +26,7 @@ type deleteNodeConfig struct { var errNodeMismatch = errors.New("node id mismatch") -func newDeleteNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface) ActionHandler { +func newDeleteNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface) actionHandler { return &deleteNodeHandler{ log: log, clientset: clientset, @@ -49,8 +49,8 @@ type deleteNodeHandler struct { cfg deleteNodeConfig } -func (h *deleteNodeHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionDeleteNode) +func (h *deleteNodeHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionDeleteNode) if !ok { return fmt.Errorf("unexpected type %T for delete node handler", action.Data()) } @@ -58,7 +58,7 @@ func (h *deleteNodeHandler) Handle(ctx context.Context, action *castai.ClusterAc log := h.log.WithFields(logrus.Fields{ "node_name": req.NodeName, "node_id": req.NodeID, - "type": reflect.TypeOf(action.Data().(*castai.ActionDeleteNode)).String(), + "type": reflect.TypeOf(action.Data().(*types.ActionDeleteNode)).String(), actionIDLogField: action.ID, }) log.Info("deleting kubernetes node") @@ -78,7 +78,7 @@ func (h *deleteNodeHandler) Handle(ctx context.Context, action *castai.ClusterAc return true, fmt.Errorf("error getting node: %w", err) } - if val, ok := current.Labels[castai.LabelNodeID]; ok { + if val, ok := current.Labels[labelNodeID]; ok { if val != "" && val != req.NodeID { log.Infof("node id mismatch, expected %q got %q. Skipping delete.", req.NodeID, val) return true, errNodeMismatch diff --git a/actions/delete_node_handler_test.go b/actions/delete_node_handler_test.go index 8dfce49a..931c6c32 100644 --- a/actions/delete_node_handler_test.go +++ b/actions/delete_node_handler_test.go @@ -5,16 +5,15 @@ import ( "testing" "github.com/google/uuid" - "k8s.io/apimachinery/pkg/fields" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes/fake" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestDeleteNodeHandler(t *testing.T) { @@ -31,9 +30,9 @@ func TestDeleteNodeHandler(t *testing.T) { } clientset := fake.NewSimpleClientset(node) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDeleteNode: &castai.ActionDeleteNode{ + ActionDeleteNode: &types.ActionDeleteNode{ NodeName: "node1", }, } @@ -62,9 +61,9 @@ func TestDeleteNodeHandler(t *testing.T) { } clientset := fake.NewSimpleClientset(node) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDeleteNode: &castai.ActionDeleteNode{ + ActionDeleteNode: &types.ActionDeleteNode{ NodeName: "already-deleted-node", }, } @@ -89,15 +88,15 @@ func TestDeleteNodeHandler(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - castai.LabelNodeID: "node-id", + labelNodeID: "node-id", }, }, } clientset := fake.NewSimpleClientset(node) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDeleteNode: &castai.ActionDeleteNode{ + ActionDeleteNode: &types.ActionDeleteNode{ NodeName: "node1", NodeID: "another-node-id", }, @@ -114,7 +113,7 @@ func TestDeleteNodeHandler(t *testing.T) { existing, err := clientset.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) r.NoError(err) - existing.Labels[castai.LabelNodeID] = "node-id" + existing.Labels[labelNodeID] = "node-id" }) t.Run("delete node with pods", func(t *testing.T) { @@ -123,9 +122,9 @@ func TestDeleteNodeHandler(t *testing.T) { podName := "pod1" clientset := setupFakeClientWithNodePodEviction(nodeName, podName) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDeleteNode: &castai.ActionDeleteNode{ + ActionDeleteNode: &types.ActionDeleteNode{ NodeName: nodeName, }, } diff --git a/actions/disconnect_cluster_handler.go b/actions/disconnect_cluster_handler.go index 1acbad70..64cc4d7d 100644 --- a/actions/disconnect_cluster_handler.go +++ b/actions/disconnect_cluster_handler.go @@ -3,16 +3,17 @@ package actions import ( "context" "fmt" - "github.com/castai/cluster-controller/castai" "reflect" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + + "github.com/castai/cluster-controller/types" ) -func newDisconnectClusterHandler(log logrus.FieldLogger, client kubernetes.Interface) ActionHandler { +func newDisconnectClusterHandler(log logrus.FieldLogger, client kubernetes.Interface) actionHandler { return &disconnectClusterHandler{ log: log, client: client, @@ -24,7 +25,7 @@ type disconnectClusterHandler struct { client kubernetes.Interface } -func (c *disconnectClusterHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { +func (c *disconnectClusterHandler) Handle(ctx context.Context, action *types.ClusterAction) error { ns := "castai-agent" _, err := c.client.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) if err != nil { @@ -40,7 +41,7 @@ func (c *disconnectClusterHandler) Handle(ctx context.Context, action *castai.Cl return err } log := c.log.WithFields(logrus.Fields{ - "type": reflect.TypeOf(action.Data().(*castai.ActionDisconnectCluster)).String(), + "type": reflect.TypeOf(action.Data().(*types.ActionDisconnectCluster)).String(), actionIDLogField: action.ID, }) diff --git a/actions/disconnect_cluster_handler_test.go b/actions/disconnect_cluster_handler_test.go index 5bf06e1d..fbc136b7 100644 --- a/actions/disconnect_cluster_handler_test.go +++ b/actions/disconnect_cluster_handler_test.go @@ -2,9 +2,9 @@ package actions import ( "context" - "github.com/google/uuid" "testing" + "github.com/google/uuid" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestDisconnectClusterHandler(t *testing.T) { @@ -27,9 +27,9 @@ func TestDisconnectClusterHandler(t *testing.T) { } clientset := fake.NewSimpleClientset(node) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDisconnectCluster: &castai.ActionDisconnectCluster{}, + ActionDisconnectCluster: &types.ActionDisconnectCluster{}, } handler := newDisconnectClusterHandler(logrus.New(), clientset) diff --git a/actions/drain_node_handler.go b/actions/drain_node_handler.go index 17dc7820..c9f30686 100644 --- a/actions/drain_node_handler.go +++ b/actions/drain_node_handler.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/kubectl/pkg/drain" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) @@ -40,7 +40,7 @@ type drainNodeConfig struct { skipDeletedTimeoutSeconds int } -func newDrainNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface, castNamespace string) ActionHandler { +func newDrainNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface, castNamespace string) actionHandler { return &drainNodeHandler{ log: log, clientset: clientset, @@ -58,7 +58,7 @@ func newDrainNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface, // getDrainTimeout returns drain timeout adjusted to action creation time. // the result is clamped between 0s and the requested timeout. -func (h *drainNodeHandler) getDrainTimeout(action *castai.ClusterAction) time.Duration { +func (h *drainNodeHandler) getDrainTimeout(action *types.ClusterAction) time.Duration { timeSinceCreated := time.Since(action.CreatedAt) drainTimeout := time.Duration(action.ActionDrainNode.DrainTimeoutSeconds) * time.Second @@ -74,8 +74,8 @@ type drainNodeHandler struct { cfg drainNodeConfig } -func (h *drainNodeHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionDrainNode) +func (h *drainNodeHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionDrainNode) if !ok { return fmt.Errorf("unexpected type %T for drain handler", action.Data()) } @@ -83,7 +83,7 @@ func (h *drainNodeHandler) Handle(ctx context.Context, action *castai.ClusterAct log := h.log.WithFields(logrus.Fields{ "node_name": req.NodeName, "node_id": req.NodeID, - "action": reflect.TypeOf(action.Data().(*castai.ActionDrainNode)).String(), + "action": reflect.TypeOf(action.Data().(*types.ActionDrainNode)).String(), actionIDLogField: action.ID, }) diff --git a/actions/drain_node_handler_test.go b/actions/drain_node_handler_test.go index 12fce87d..f8ba4256 100644 --- a/actions/drain_node_handler_test.go +++ b/actions/drain_node_handler_test.go @@ -18,7 +18,7 @@ import ( "k8s.io/client-go/kubernetes/fake" ktest "k8s.io/client-go/testing" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestDrainNodeHandler(t *testing.T) { @@ -33,9 +33,9 @@ func TestDrainNodeHandler(t *testing.T) { clientset := setupFakeClientWithNodePodEviction(nodeName, podName) prependEvictionReaction(t, clientset, true) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 1, Force: true, @@ -74,9 +74,9 @@ func TestDrainNodeHandler(t *testing.T) { clientset := setupFakeClientWithNodePodEviction(nodeName, podName) prependEvictionReaction(t, clientset, true) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "already-deleted-node", DrainTimeoutSeconds: 1, Force: true, @@ -103,9 +103,9 @@ func TestDrainNodeHandler(t *testing.T) { clientset := setupFakeClientWithNodePodEviction(nodeName, podName) prependEvictionReaction(t, clientset, false) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 1, Force: true, @@ -136,9 +136,9 @@ func TestDrainNodeHandler(t *testing.T) { podName := "pod1" clientset := setupFakeClientWithNodePodEviction(nodeName, podName) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 1, Force: true, @@ -185,9 +185,9 @@ func TestDrainNodeHandler(t *testing.T) { podName := "pod1" clientset := setupFakeClientWithNodePodEviction(nodeName, podName) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 1, Force: true, @@ -227,9 +227,9 @@ func TestDrainNodeHandler(t *testing.T) { podName := "pod1" clientset := setupFakeClientWithNodePodEviction(nodeName, podName) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 1, Force: true, @@ -273,9 +273,9 @@ func TestGetDrainTimeout(t *testing.T) { t.Run("drain timeout for new action should be the same like in request", func(t *testing.T) { r := require.New(t) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 100, Force: true, @@ -293,9 +293,9 @@ func TestGetDrainTimeout(t *testing.T) { t.Run("drain timeout for older action should be decreased by time since action creation", func(t *testing.T) { r := require.New(t) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 600, Force: true, @@ -313,9 +313,9 @@ func TestGetDrainTimeout(t *testing.T) { t.Run("drain timeout min wait timeout should be 0s", func(t *testing.T) { r := require.New(t) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionDrainNode: &castai.ActionDrainNode{ + ActionDrainNode: &types.ActionDrainNode{ NodeName: "node1", DrainTimeoutSeconds: 600, Force: true, diff --git a/actions/mock/client.go b/actions/mock/client.go new file mode 100644 index 00000000..8c72ea18 --- /dev/null +++ b/actions/mock/client.go @@ -0,0 +1,79 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/castai/cluster-controller/actions (interfaces: Client) + +// Package mock_actions is a generated GoMock package. +package mock_actions + +import ( + context "context" + reflect "reflect" + + types "github.com/castai/cluster-controller/types" + gomock "github.com/golang/mock/gomock" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// AckAction mocks base method. +func (m *MockClient) AckAction(arg0 context.Context, arg1 string, arg2 *string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AckAction", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AckAction indicates an expected call of AckAction. +func (mr *MockClientMockRecorder) AckAction(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AckAction", reflect.TypeOf((*MockClient)(nil).AckAction), arg0, arg1, arg2) +} + +// GetActions mocks base method. +func (m *MockClient) GetActions(arg0 context.Context, arg1 string) ([]*types.ClusterAction, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActions", arg0, arg1) + ret0, _ := ret[0].([]*types.ClusterAction) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActions indicates an expected call of GetActions. +func (mr *MockClientMockRecorder) GetActions(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActions", reflect.TypeOf((*MockClient)(nil).GetActions), arg0, arg1) +} + +// SendAKSInitData mocks base method. +func (m *MockClient) SendAKSInitData(arg0 context.Context, arg1, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAKSInitData", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendAKSInitData indicates an expected call of SendAKSInitData. +func (mr *MockClientMockRecorder) SendAKSInitData(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAKSInitData", reflect.TypeOf((*MockClient)(nil).SendAKSInitData), arg0, arg1, arg2, arg3) +} diff --git a/actions/patch_handler.go b/actions/patch_handler.go index d3e5e34b..9a9b969b 100644 --- a/actions/patch_handler.go +++ b/actions/patch_handler.go @@ -13,7 +13,7 @@ import ( apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) type patchHandler struct { @@ -21,15 +21,15 @@ type patchHandler struct { client dynamic.Interface } -func newPatchHandler(log logrus.FieldLogger, client dynamic.Interface) ActionHandler { +func newPatchHandler(log logrus.FieldLogger, client dynamic.Interface) actionHandler { return &patchHandler{ log: log, client: client, } } -func (h *patchHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionPatch) +func (h *patchHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionPatch) if !ok { return newUnexpectedTypeErr(action.Data(), req) } diff --git a/actions/patch_handler_test.go b/actions/patch_handler_test.go index 341e73a8..02f0f527 100644 --- a/actions/patch_handler_test.go +++ b/actions/patch_handler_test.go @@ -15,24 +15,24 @@ import ( "k8s.io/client-go/dynamic/fake" client_testing "k8s.io/client-go/testing" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestPatchHandler(t *testing.T) { tests := map[string]struct { objs []runtime.Object - action *castai.ClusterAction + action *types.ClusterAction err error }{ "should return an error when the action is nil": { - action: &castai.ClusterAction{}, - err: newUnexpectedTypeErr(nil, &castai.ActionPatch{}), + action: &types.ClusterAction{}, + err: newUnexpectedTypeErr(nil, &types.ActionPatch{}), }, "should return an error when the action is of a different type": { - action: &castai.ClusterAction{ - ActionDeleteNode: &castai.ActionDeleteNode{}, + action: &types.ClusterAction{ + ActionDeleteNode: &types.ActionDeleteNode{}, }, - err: newUnexpectedTypeErr(&castai.ActionDeleteNode{}, &castai.ActionPatch{}), + err: newUnexpectedTypeErr(&types.ActionDeleteNode{}, &types.ActionPatch{}), }, "should forward patch to the api in the request": { objs: []runtime.Object{ @@ -50,10 +50,10 @@ func TestPatchHandler(t *testing.T) { }, }, }, - action: &castai.ClusterAction{ - ActionPatch: &castai.ActionPatch{ - ID: castai.ObjectID{ - GroupVersionResource: castai.GroupVersionResource{ + action: &types.ClusterAction{ + ActionPatch: &types.ActionPatch{ + ID: types.ObjectID{ + GroupVersionResource: types.GroupVersionResource{ Group: "apps", Version: "v1", Resource: "deployments", diff --git a/actions/patch_node_handler.go b/actions/patch_node_handler.go index 9ef7c4ca..4a837d2a 100644 --- a/actions/patch_node_handler.go +++ b/actions/patch_node_handler.go @@ -13,10 +13,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/kubernetes" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) -func newPatchNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface) ActionHandler { +func newPatchNodeHandler(log logrus.FieldLogger, clientset kubernetes.Interface) actionHandler { return &patchNodeHandler{ log: log, clientset: clientset, @@ -28,8 +28,8 @@ type patchNodeHandler struct { clientset kubernetes.Interface } -func (h *patchNodeHandler) Handle(ctx context.Context, action *castai.ClusterAction) error { - req, ok := action.Data().(*castai.ActionPatchNode) +func (h *patchNodeHandler) Handle(ctx context.Context, action *types.ClusterAction) error { + req, ok := action.Data().(*types.ActionPatchNode) if !ok { return fmt.Errorf("unexpected type %T for delete patch handler", action.Data()) } @@ -52,7 +52,7 @@ func (h *patchNodeHandler) Handle(ctx context.Context, action *castai.ClusterAct log := h.log.WithFields(logrus.Fields{ "node_name": req.NodeName, "node_id": req.NodeID, - "action": reflect.TypeOf(action.Data().(*castai.ActionPatchNode)).String(), + "action": reflect.TypeOf(action.Data().(*types.ActionPatchNode)).String(), actionIDLogField: action.ID, }) @@ -121,7 +121,7 @@ func patchNodeMapField(values map[string]string, patch map[string]string) map[st return values } -func patchTaints(taints []v1.Taint, patch []castai.NodeTaint) []v1.Taint { +func patchTaints(taints []v1.Taint, patch []types.NodeTaint) []v1.Taint { for _, v := range patch { taint := &v1.Taint{Key: v.Key, Value: v.Value, Effect: v1.TaintEffect(v.Effect)} if v.Key[0] == '-' { diff --git a/actions/patch_node_handler_test.go b/actions/patch_node_handler_test.go index 59b9cfd8..357391b5 100644 --- a/actions/patch_node_handler_test.go +++ b/actions/patch_node_handler_test.go @@ -13,7 +13,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestPatchNodeHandler(t *testing.T) { @@ -22,7 +22,7 @@ func TestPatchNodeHandler(t *testing.T) { log := logrus.New() log.SetLevel(logrus.DebugLevel) - t.Run("patch successfully", func(t *testing.T) { + t.Run("patch successfully", func(_ *testing.T) { nodeName := "node1" node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -56,9 +56,9 @@ func TestPatchNodeHandler(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "node1", Labels: map[string]string{ "-l1": "", @@ -68,7 +68,7 @@ func TestPatchNodeHandler(t *testing.T) { "-a1": "", "a2": "", }, - Taints: []castai.NodeTaint{ + Taints: []types.NodeTaint{ { Key: "t3", Value: "t3", @@ -111,7 +111,7 @@ func TestPatchNodeHandler(t *testing.T) { r.Equal(action.ActionPatchNode.Capacity["foo"], n.Status.Capacity["foo"]) }) - t.Run("skip patch when node not found", func(t *testing.T) { + t.Run("skip patch when node not found", func(_ *testing.T) { nodeName := "node1" node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -120,9 +120,9 @@ func TestPatchNodeHandler(t *testing.T) { } clientset := fake.NewSimpleClientset(node) - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "already-deleted-node", }, } @@ -138,7 +138,7 @@ func TestPatchNodeHandler(t *testing.T) { r.NoError(err) }) - t.Run("cordoning node", func(t *testing.T) { + t.Run("cordoning node", func(_ *testing.T) { nodeName := "node1" node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -155,9 +155,9 @@ func TestPatchNodeHandler(t *testing.T) { clientset: clientset, } - action := &castai.ClusterAction{ + action := &types.ClusterAction{ ID: uuid.New().String(), - ActionPatchNode: &castai.ActionPatchNode{ + ActionPatchNode: &types.ActionPatchNode{ NodeName: "node1", Unschedulable: lo.ToPtr(true), }, diff --git a/actions/send_aks_init_data_handler.go b/actions/send_aks_init_data_handler.go index 8e2bd83e..805715a5 100644 --- a/actions/send_aks_init_data_handler.go +++ b/actions/send_aks_init_data_handler.go @@ -18,10 +18,10 @@ import ( "github.com/sirupsen/logrus" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) -func newSendAKSInitDataHandler(log logrus.FieldLogger, client castai.ActionsClient) ActionHandler { +func newSendAKSInitDataHandler(log logrus.FieldLogger, client Client) actionHandler { return &sendAKSInitDataHandler{ log: log, client: client, @@ -33,13 +33,13 @@ func newSendAKSInitDataHandler(log logrus.FieldLogger, client castai.ActionsClie type sendAKSInitDataHandler struct { log logrus.FieldLogger - client castai.ActionsClient + client Client baseDir string cloudConfigPath string } -func (s *sendAKSInitDataHandler) Handle(ctx context.Context, _ *castai.ClusterAction) error { +func (s *sendAKSInitDataHandler) Handle(ctx context.Context, _ *types.ClusterAction) error { cloudConfig, err := s.readCloudConfigBase64(s.cloudConfigPath) if err != nil { return fmt.Errorf("reading cloud config: %w", err) @@ -56,10 +56,7 @@ func (s *sendAKSInitDataHandler) Handle(ctx context.Context, _ *castai.ClusterAc if err != nil { return fmt.Errorf("protected settings decrypt failed: %w", err) } - return s.client.SendAKSInitData(ctx, &castai.AKSInitDataRequest{ - CloudConfigBase64: string(cloudConfig), - ProtectedSettingsBase64: base64.StdEncoding.EncodeToString(protectedSettings), - }) + return s.client.SendAKSInitData(ctx, string(cloudConfig), base64.StdEncoding.EncodeToString(protectedSettings), "") } // readCloudConfigBase64 extracts base64 encoded cloud config content from XML file. @@ -82,7 +79,7 @@ func (s *sendAKSInitDataHandler) readCloudConfigBase64(cloudConfigPath string) ( // findSettingsPath searches for custom script settings file path which contains encrypted init data env variables. func (s *sendAKSInitDataHandler) findSettingsPath(baseDir string) (string, error) { var res string - err := filepath.WalkDir(baseDir, func(path string, d fs.DirEntry, err error) error { + err := filepath.WalkDir(baseDir, func(path string, _ fs.DirEntry, err error) error { if strings.Contains(path, "Microsoft.Azure.Extensions.CustomScript-") && strings.HasSuffix(path, "settings") { res = path return io.EOF diff --git a/actions/send_aks_init_data_handler_test.go b/actions/send_aks_init_data_handler_test.go index d2512474..4d49a5ae 100644 --- a/actions/send_aks_init_data_handler_test.go +++ b/actions/send_aks_init_data_handler_test.go @@ -2,37 +2,36 @@ package actions import ( "context" - "github.com/google/uuid" "testing" + "github.com/golang/mock/gomock" + "github.com/google/uuid" "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" - "github.com/castai/cluster-controller/castai" - "github.com/castai/cluster-controller/castai/mock" + mock_actions "github.com/castai/cluster-controller/actions/mock" + "github.com/castai/cluster-controller/types" ) func TestAKSInitDataHandler(t *testing.T) { - r := require.New(t) log := logrus.New() log.SetLevel(logrus.DebugLevel) - client := mock.NewMockAPIClient(nil) + mockCtrl := gomock.NewController(t) + client := mock_actions.NewMockClient(mockCtrl) + client.EXPECT().SendAKSInitData(gomock.Any(), gomock.Not(gomock.Len(0)), gomock.Not(gomock.Len(0)), gomock.Any()).Return(nil) + h := sendAKSInitDataHandler{ log: log, client: client, cloudConfigPath: "../testdata/aks/ovf-env.xml", baseDir: "../testdata/aks", } - - action := &castai.ClusterAction{ + ctx := context.Background() + err := h.Handle(ctx, &types.ClusterAction{ ID: uuid.New().String(), - ActionSendAKSInitData: &castai.ActionSendAKSInitData{}, + ActionSendAKSInitData: &types.ActionSendAKSInitData{}, + }) + if err != nil { + t.Fatal(err) } - ctx := context.Background() - err := h.Handle(ctx, action) - - r.NoError(err) - r.NotEmpty(client.AKSInitDataReq.CloudConfigBase64) - r.NotEmpty(client.AKSInitDataReq.ProtectedSettingsBase64) } diff --git a/castai/client.go b/castai/client.go index 4619b36e..0566c203 100644 --- a/castai/client.go +++ b/castai/client.go @@ -12,6 +12,7 @@ import ( "golang.org/x/net/http2" "github.com/castai/cluster-controller/config" + "github.com/castai/cluster-controller/types" ) const ( @@ -20,14 +21,6 @@ const ( headerKubernetesVersion = "X-K8s-Version" ) -// ActionsClient lists functions used by actions package. -// TODO: move interface into actions package. -type ActionsClient interface { - GetActions(ctx context.Context, k8sVersion string) ([]*ClusterAction, error) - AckAction(ctx context.Context, actionID string, req *AckClusterActionRequest) error - SendAKSInitData(ctx context.Context, req *AKSInitDataRequest) error -} - // Client talks to Cast AI. It can poll and acknowledge actions // and also inject logs. type Client struct { @@ -95,7 +88,12 @@ func createHTTPTransport() (*http.Transport, error) { return t1, nil } -func (c *Client) SendAKSInitData(ctx context.Context, req *AKSInitDataRequest) error { +func (c *Client) SendAKSInitData(ctx context.Context, cloudConfigBase64, protectedSettingsBase64, architecture string) error { + req := &aksInitDataRequest{ + CloudConfigBase64: cloudConfigBase64, + ProtectedSettingsBase64: protectedSettingsBase64, + Architecture: architecture, + } resp, err := c.rest.R(). SetBody(req). SetContext(ctx). @@ -137,8 +135,8 @@ func (c *Client) SendLog(ctx context.Context, e *logEntry) error { return nil } -func (c *Client) GetActions(ctx context.Context, k8sVersion string) ([]*ClusterAction, error) { - res := &GetClusterActionsResponse{} +func (c *Client) GetActions(ctx context.Context, k8sVersion string) ([]*types.ClusterAction, error) { + res := &getClusterActionsResponse{} resp, err := c.rest.R(). SetContext(ctx). SetResult(res). @@ -153,7 +151,10 @@ func (c *Client) GetActions(ctx context.Context, k8sVersion string) ([]*ClusterA return res.Items, nil } -func (c *Client) AckAction(ctx context.Context, actionID string, req *AckClusterActionRequest) error { +func (c *Client) AckAction(ctx context.Context, actionID string, errMessage *string) error { + req := &ackClusterActionRequest{ + Error: errMessage, + } resp, err := c.rest.R(). SetContext(ctx). SetBody(req). diff --git a/castai/mock/client.go b/castai/mock/client.go deleted file mode 100644 index 1bd56e3b..00000000 --- a/castai/mock/client.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "context" - "sync" - - "github.com/castai/cluster-controller/castai" -) - -var _ castai.ActionsClient = (*mockClient)(nil) - -func NewMockAPIClient(actions []*castai.ClusterAction) *mockClient { - return &mockClient{Actions: actions} -} - -type mockAck struct { - ActionID string - Err *string -} - -type mockClient struct { - Actions []*castai.ClusterAction - GetActionsErr error - Acks []*mockAck - AKSInitDataReq *castai.AKSInitDataRequest - - mu sync.Mutex -} - -func (m *mockClient) SendAKSInitData(ctx context.Context, req *castai.AKSInitDataRequest) error { - m.mu.Lock() - m.AKSInitDataReq = req - m.mu.Unlock() - return nil -} - -func (m *mockClient) GetActions(_ context.Context, _ string) ([]*castai.ClusterAction, error) { - m.mu.Lock() - actions := m.Actions - m.mu.Unlock() - return actions, m.GetActionsErr -} - -func (m *mockClient) AckAction(_ context.Context, actionID string, req *castai.AckClusterActionRequest) error { - m.mu.Lock() - defer m.mu.Unlock() - - m.removeAckedActions(actionID) - - m.Acks = append(m.Acks, &mockAck{ActionID: actionID, Err: req.Error}) - return nil -} - -func (m *mockClient) removeAckedActions(actionID string) { - var remaining []*castai.ClusterAction - for _, action := range m.Actions { - if action.ID != actionID { - remaining = append(remaining, action) - } - } - m.Actions = remaining -} diff --git a/castai/types.go b/castai/types.go index 527c62ab..e035b121 100644 --- a/castai/types.go +++ b/castai/types.go @@ -1,244 +1,16 @@ package castai -import ( - "errors" - "fmt" - "time" +import "github.com/castai/cluster-controller/types" - "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" -) - -const ( - LabelNodeID = "provisioner.cast.ai/node-id" -) - -type GetClusterActionsResponse struct { - Items []*ClusterAction `json:"items"` +type getClusterActionsResponse struct { + Items []*types.ClusterAction `json:"items"` } -type AckClusterActionRequest struct { +type ackClusterActionRequest struct { Error *string `json:"error"` } -type ClusterAction struct { - ID string `json:"id"` - ActionDeleteNode *ActionDeleteNode `json:"actionDeleteNode,omitempty"` - ActionDrainNode *ActionDrainNode `json:"actionDrainNode,omitempty"` - ActionPatchNode *ActionPatchNode `json:"actionPatchNode,omitempty"` - ActionCreateEvent *ActionCreateEvent `json:"actionCreateEvent,omitempty"` - ActionApproveCSR *ActionApproveCSR `json:"actionApproveCsr,omitempty"` - ActionChartUpsert *ActionChartUpsert `json:"actionChartUpsert,omitempty"` - ActionChartUninstall *ActionChartUninstall `json:"actionChartUninstall,omitempty"` - ActionChartRollback *ActionChartRollback `json:"actionChartRollback,omitempty"` - ActionDisconnectCluster *ActionDisconnectCluster `json:"actionDisconnectCluster,omitempty"` - ActionSendAKSInitData *ActionSendAKSInitData `json:"actionSendAksInitData,omitempty"` - ActionCheckNodeDeleted *ActionCheckNodeDeleted `json:"actionCheckNodeDeleted,omitempty"` - ActionCheckNodeStatus *ActionCheckNodeStatus `json:"actionCheckNodeStatus,omitempty"` - ActionPatch *ActionPatch `json:"actionPatch,omitempty"` - ActionCreate *ActionCreate `json:"actionCreate,omitempty"` - ActionDelete *ActionDelete `json:"actionDelete,omitempty"` - CreatedAt time.Time `json:"createdAt"` - DoneAt *time.Time `json:"doneAt,omitempty"` - Error *string `json:"error,omitempty"` -} - -func (c *ClusterAction) Data() interface{} { - if c.ActionDeleteNode != nil { - return c.ActionDeleteNode - } - if c.ActionDrainNode != nil { - return c.ActionDrainNode - } - if c.ActionPatchNode != nil { - return c.ActionPatchNode - } - if c.ActionCreateEvent != nil { - return c.ActionCreateEvent - } - if c.ActionApproveCSR != nil { - return c.ActionApproveCSR - } - if c.ActionChartUpsert != nil { - return c.ActionChartUpsert - } - if c.ActionChartUninstall != nil { - return c.ActionChartUninstall - } - if c.ActionChartRollback != nil { - return c.ActionChartRollback - } - if c.ActionDisconnectCluster != nil { - return c.ActionDisconnectCluster - } - if c.ActionSendAKSInitData != nil { - return c.ActionSendAKSInitData - } - if c.ActionCheckNodeDeleted != nil { - return c.ActionCheckNodeDeleted - } - if c.ActionCheckNodeStatus != nil { - return c.ActionCheckNodeStatus - } - if c.ActionPatch != nil { - return c.ActionPatch - } - if c.ActionCreate != nil { - return c.ActionCreate - } - if c.ActionDelete != nil { - return c.ActionDelete - } - return nil -} - -type LogEvent struct { - Level string `json:"level"` - Time time.Time `json:"time"` - Message string `json:"message"` - Fields logrus.Fields `json:"fields"` -} - -type GroupVersionResource struct { - Group string `json:"group"` - Version string `json:"version"` - Resource string `json:"resource"` -} - -func (r GroupVersionResource) String() string { - return fmt.Sprintf("%v/%v/%v", r.Group, r.Version, r.Resource) -} - -type ObjectID struct { - GroupVersionResource `json:",inline"` - Namespace *string `json:"namespace"` - Name string `json:"name"` -} - -type ActionPatch struct { - ID ObjectID `json:"id"` - PatchType string `json:"patchType"` - Patch string `json:"patch"` -} - -type ActionCreate struct { - GroupVersionResource `json:",inline"` - Object map[string]interface{} `json:"object,omitempty"` -} - -type ActionDelete struct { - ID ObjectID `json:"id"` -} - -type ActionDeleteNode struct { - NodeName string `json:"nodeName"` - NodeID string `json:"nodeId"` -} - -type ActionDrainNode struct { - NodeName string `json:"nodeName"` - NodeID string `json:"nodeId"` - DrainTimeoutSeconds int `json:"drainTimeoutSeconds"` - Force bool `json:"force"` -} - -type ActionApproveCSR struct { - NodeName string `json:"nodeName"` - NodeID string `json:"nodeId"` -} - -type ActionPatchNode struct { - NodeName string `json:"nodeName"` - NodeID string `json:"nodeId"` - Labels map[string]string `json:"labels"` - Taints []NodeTaint `json:"taints"` - Annotations map[string]string `json:"annotations"` - Unschedulable *bool `json:"unschedulable"` - // Capacity allows advertising extended resources for a Node. - // More info: https://kubernetes.io/docs/tasks/administer-cluster/extended-resource-node/ - Capacity v1.ResourceList `json:"capacity"` -} - -type NodeTaint struct { - Effect string `json:"effect"` - Key string `json:"key"` - Value string `json:"value"` -} - -type ActionCreateEvent struct { - Reporter string `json:"reportingComponent"` - ObjectRef v1.ObjectReference `json:"objectReference"` - EventTime time.Time `json:"eventTime"` - EventType string `json:"eventType"` - Reason string `json:"reason"` - Action string `json:"action"` - Message string `json:"message"` -} - -type ActionDisconnectCluster struct { -} - -type ActionSendAKSInitData struct { -} - -type ActionCheckNodeDeleted struct { - NodeName string `json:"nodeName"` - NodeID string `json:"nodeId"` -} - -type ActionCheckNodeStatus_Status string - -const ( - ActionCheckNodeStatus_READY ActionCheckNodeStatus_Status = "NodeStatus_READY" - ActionCheckNodeStatus_DELETED ActionCheckNodeStatus_Status = "NodeStatus_DELETED" -) - -type ActionCheckNodeStatus struct { - NodeName string `json:"nodeName"` - NodeID string `json:"nodeId"` - NodeStatus ActionCheckNodeStatus_Status `json:"nodeStatus,omitempty"` - WaitTimeoutSeconds *int32 `json:"waitTimeoutSeconds,omitempty"` -} - -type ActionChartUpsert struct { - Namespace string `json:"namespace"` - ReleaseName string `json:"releaseName"` - ValuesOverrides map[string]string `json:"valuesOverrides,omitempty"` - ChartSource ChartSource `json:"chartSource"` - CreateNamespace bool `json:"createNamespace"` -} - -type ActionChartUninstall struct { - Namespace string `json:"namespace"` - ReleaseName string `json:"releaseName"` -} - -type ActionChartRollback struct { - Namespace string `json:"namespace"` - ReleaseName string `json:"releaseName"` - Version string `json:"version"` -} - -type ChartSource struct { - RepoURL string `json:"repoUrl"` - Name string `json:"name"` - Version string `json:"version"` -} - -func (c *ChartSource) Validate() error { - if c.Name == "" { - return errors.New("chart name is not set") - } - if c.RepoURL == "" { - return errors.New("chart repoURL is not set") - } - if c.Version == "" { - return errors.New("chart version is not set") - } - return nil -} - -type AKSInitDataRequest struct { +type aksInitDataRequest struct { CloudConfigBase64 string `json:"cloudConfigBase64"` ProtectedSettingsBase64 string `json:"protectedSettingsBase64"` Architecture string `json:"architecture"` diff --git a/helm/chart_loader.go b/helm/chart_loader.go index 172f9d24..9849a946 100644 --- a/helm/chart_loader.go +++ b/helm/chart_loader.go @@ -17,7 +17,7 @@ import ( "helm.sh/helm/v3/pkg/getter" "helm.sh/helm/v3/pkg/repo" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" "github.com/castai/cluster-controller/waitext" ) @@ -26,7 +26,7 @@ const ( ) type ChartLoader interface { - Load(ctx context.Context, c *castai.ChartSource) (*chart.Chart, error) + Load(ctx context.Context, c *types.ChartSource) (*chart.Chart, error) } func NewChartLoader(log logrus.FieldLogger) ChartLoader { @@ -38,7 +38,7 @@ type remoteChartLoader struct { log logrus.FieldLogger } -func (cl *remoteChartLoader) Load(ctx context.Context, c *castai.ChartSource) (*chart.Chart, error) { +func (cl *remoteChartLoader) Load(ctx context.Context, c *types.ChartSource) (*chart.Chart, error) { var res *chart.Chart err := waitext.Retry( diff --git a/helm/chart_loader_test.go b/helm/chart_loader_test.go index b21e9b55..c5791c5d 100644 --- a/helm/chart_loader_test.go +++ b/helm/chart_loader_test.go @@ -8,7 +8,7 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestIntegration_ChartLoader(t *testing.T) { @@ -16,7 +16,7 @@ func TestIntegration_ChartLoader(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - chart := &castai.ChartSource{ + chart := &types.ChartSource{ RepoURL: "https://castai.github.io/helm-charts", Name: "castai-cluster-controller", Version: "0.4.3", diff --git a/helm/client.go b/helm/client.go index f373e197..b36987e2 100644 --- a/helm/client.go +++ b/helm/client.go @@ -20,12 +20,12 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" - "github.com/castai/cluster-controller/castai" "github.com/castai/cluster-controller/helm/hook" + "github.com/castai/cluster-controller/types" ) type InstallOptions struct { - ChartSource *castai.ChartSource + ChartSource *types.ChartSource Namespace string CreateNamespace bool ReleaseName string @@ -38,7 +38,7 @@ type UninstallOptions struct { } type UpgradeOptions struct { - ChartSource *castai.ChartSource + ChartSource *types.ChartSource Release *release.Release ValuesOverrides map[string]string MaxHistory int diff --git a/helm/client_test.go b/helm/client_test.go index 4e9ef535..0b4350ed 100644 --- a/helm/client_test.go +++ b/helm/client_test.go @@ -16,7 +16,7 @@ import ( "helm.sh/helm/v3/pkg/storage/driver" "helm.sh/helm/v3/pkg/time" - "github.com/castai/cluster-controller/castai" + "github.com/castai/cluster-controller/types" ) func TestClientInstall(t *testing.T) { @@ -124,7 +124,7 @@ type testChartLoader struct { chart *chart.Chart } -func (t *testChartLoader) Load(_ context.Context, _ *castai.ChartSource) (*chart.Chart, error) { +func (t *testChartLoader) Load(_ context.Context, _ *types.ChartSource) (*chart.Chart, error) { return t.chart, nil } diff --git a/helm/mock/chart_loader.go b/helm/mock/chart_loader.go index 4cf60f98..38f4c9a3 100644 --- a/helm/mock/chart_loader.go +++ b/helm/mock/chart_loader.go @@ -8,7 +8,7 @@ import ( context "context" reflect "reflect" - castai "github.com/castai/cluster-controller/castai" + types "github.com/castai/cluster-controller/types" gomock "github.com/golang/mock/gomock" chart "helm.sh/helm/v3/pkg/chart" ) @@ -37,7 +37,7 @@ func (m *MockChartLoader) EXPECT() *MockChartLoaderMockRecorder { } // Load mocks base method. -func (m *MockChartLoader) Load(arg0 context.Context, arg1 *castai.ChartSource) (*chart.Chart, error) { +func (m *MockChartLoader) Load(arg0 context.Context, arg1 *types.ChartSource) (*chart.Chart, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load", arg0, arg1) ret0, _ := ret[0].(*chart.Chart) diff --git a/main.go b/main.go index 70ccb5d1..b9b52663 100644 --- a/main.go +++ b/main.go @@ -90,7 +90,7 @@ func main() { func run( ctx context.Context, - client castai.ActionsClient, + client actions.Client, logger *logrus.Logger, cfg config.Config, binVersion *config.ClusterControllerVersion, diff --git a/types/types.go b/types/types.go new file mode 100644 index 00000000..ca19ede1 --- /dev/null +++ b/types/types.go @@ -0,0 +1,220 @@ +// The package types contains common type defintions used by multiple packages. +package types + +import ( + "errors" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" +) + +type ClusterAction struct { + ID string `json:"id"` + ActionDeleteNode *ActionDeleteNode `json:"actionDeleteNode,omitempty"` + ActionDrainNode *ActionDrainNode `json:"actionDrainNode,omitempty"` + ActionPatchNode *ActionPatchNode `json:"actionPatchNode,omitempty"` + ActionCreateEvent *ActionCreateEvent `json:"actionCreateEvent,omitempty"` + ActionApproveCSR *ActionApproveCSR `json:"actionApproveCsr,omitempty"` + ActionChartUpsert *ActionChartUpsert `json:"actionChartUpsert,omitempty"` + ActionChartUninstall *ActionChartUninstall `json:"actionChartUninstall,omitempty"` + ActionChartRollback *ActionChartRollback `json:"actionChartRollback,omitempty"` + ActionDisconnectCluster *ActionDisconnectCluster `json:"actionDisconnectCluster,omitempty"` + ActionSendAKSInitData *ActionSendAKSInitData `json:"actionSendAksInitData,omitempty"` + ActionCheckNodeDeleted *ActionCheckNodeDeleted `json:"actionCheckNodeDeleted,omitempty"` + ActionCheckNodeStatus *ActionCheckNodeStatus `json:"actionCheckNodeStatus,omitempty"` + ActionPatch *ActionPatch `json:"actionPatch,omitempty"` + ActionCreate *ActionCreate `json:"actionCreate,omitempty"` + ActionDelete *ActionDelete `json:"actionDelete,omitempty"` + CreatedAt time.Time `json:"createdAt"` + DoneAt *time.Time `json:"doneAt,omitempty"` + Error *string `json:"error,omitempty"` +} + +func (c *ClusterAction) Data() interface{} { + if c.ActionDeleteNode != nil { + return c.ActionDeleteNode + } + if c.ActionDrainNode != nil { + return c.ActionDrainNode + } + if c.ActionPatchNode != nil { + return c.ActionPatchNode + } + if c.ActionCreateEvent != nil { + return c.ActionCreateEvent + } + if c.ActionApproveCSR != nil { + return c.ActionApproveCSR + } + if c.ActionChartUpsert != nil { + return c.ActionChartUpsert + } + if c.ActionChartUninstall != nil { + return c.ActionChartUninstall + } + if c.ActionChartRollback != nil { + return c.ActionChartRollback + } + if c.ActionDisconnectCluster != nil { + return c.ActionDisconnectCluster + } + if c.ActionSendAKSInitData != nil { + return c.ActionSendAKSInitData + } + if c.ActionCheckNodeDeleted != nil { + return c.ActionCheckNodeDeleted + } + if c.ActionCheckNodeStatus != nil { + return c.ActionCheckNodeStatus + } + if c.ActionPatch != nil { + return c.ActionPatch + } + if c.ActionCreate != nil { + return c.ActionCreate + } + if c.ActionDelete != nil { + return c.ActionDelete + } + return nil +} + +type GroupVersionResource struct { + Group string `json:"group"` + Version string `json:"version"` + Resource string `json:"resource"` +} + +func (r GroupVersionResource) String() string { + return fmt.Sprintf("%v/%v/%v", r.Group, r.Version, r.Resource) +} + +type ObjectID struct { + GroupVersionResource `json:",inline"` + Namespace *string `json:"namespace"` + Name string `json:"name"` +} + +type ActionPatch struct { + ID ObjectID `json:"id"` + PatchType string `json:"patchType"` + Patch string `json:"patch"` +} + +type ActionCreate struct { + GroupVersionResource `json:",inline"` + Object map[string]interface{} `json:"object,omitempty"` +} + +type ActionDelete struct { + ID ObjectID `json:"id"` +} + +type ActionDeleteNode struct { + NodeName string `json:"nodeName"` + NodeID string `json:"nodeId"` +} + +type ActionDrainNode struct { + NodeName string `json:"nodeName"` + NodeID string `json:"nodeId"` + DrainTimeoutSeconds int `json:"drainTimeoutSeconds"` + Force bool `json:"force"` +} + +type ActionApproveCSR struct { + NodeName string `json:"nodeName"` + NodeID string `json:"nodeId"` +} + +type ActionPatchNode struct { + NodeName string `json:"nodeName"` + NodeID string `json:"nodeId"` + Labels map[string]string `json:"labels"` + Taints []NodeTaint `json:"taints"` + Annotations map[string]string `json:"annotations"` + Unschedulable *bool `json:"unschedulable"` + // Capacity allows advertising extended resources for a Node. + // More info: https://kubernetes.io/docs/tasks/administer-cluster/extended-resource-node/ + Capacity v1.ResourceList `json:"capacity"` +} + +type NodeTaint struct { + Effect string `json:"effect"` + Key string `json:"key"` + Value string `json:"value"` +} + +type ActionCreateEvent struct { + Reporter string `json:"reportingComponent"` + ObjectRef v1.ObjectReference `json:"objectReference"` + EventTime time.Time `json:"eventTime"` + EventType string `json:"eventType"` + Reason string `json:"reason"` + Action string `json:"action"` + Message string `json:"message"` +} + +type ActionDisconnectCluster struct { +} + +type ActionSendAKSInitData struct { +} + +type ActionCheckNodeDeleted struct { + NodeName string `json:"nodeName"` + NodeID string `json:"nodeId"` +} + +type ActionCheckNodeStatus_Status string + +const ( + ActionCheckNodeStatus_READY ActionCheckNodeStatus_Status = "NodeStatus_READY" + ActionCheckNodeStatus_DELETED ActionCheckNodeStatus_Status = "NodeStatus_DELETED" +) + +type ActionCheckNodeStatus struct { + NodeName string `json:"nodeName"` + NodeID string `json:"nodeId"` + NodeStatus ActionCheckNodeStatus_Status `json:"nodeStatus,omitempty"` + WaitTimeoutSeconds *int32 `json:"waitTimeoutSeconds,omitempty"` +} + +type ActionChartUpsert struct { + Namespace string `json:"namespace"` + ReleaseName string `json:"releaseName"` + ValuesOverrides map[string]string `json:"valuesOverrides,omitempty"` + ChartSource ChartSource `json:"chartSource"` + CreateNamespace bool `json:"createNamespace"` +} + +type ActionChartUninstall struct { + Namespace string `json:"namespace"` + ReleaseName string `json:"releaseName"` +} + +type ActionChartRollback struct { + Namespace string `json:"namespace"` + ReleaseName string `json:"releaseName"` + Version string `json:"version"` +} + +type ChartSource struct { + RepoURL string `json:"repoUrl"` + Name string `json:"name"` + Version string `json:"version"` +} + +func (c *ChartSource) Validate() error { + if c.Name == "" { + return errors.New("chart name is not set") + } + if c.RepoURL == "" { + return errors.New("chart repoURL is not set") + } + if c.Version == "" { + return errors.New("chart version is not set") + } + return nil +} diff --git a/version/mock/version.go b/version/mock/version.go index 77f2dd73..a0d2f903 100644 --- a/version/mock/version.go +++ b/version/mock/version.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: castai-agent/internal/services/version (interfaces: Interface) +// Source: github.com/castai/cluster-controller/version (interfaces: Interface) // Package mock_version is a generated GoMock package. package mock_version