diff --git a/cmd/vcluster/cmd/start.go b/cmd/vcluster/cmd/start.go index c83281c92f..6c25355fc8 100644 --- a/cmd/vcluster/cmd/start.go +++ b/cmd/vcluster/cmd/start.go @@ -14,6 +14,7 @@ import ( "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/setup" "github.com/loft-sh/vcluster/pkg/telemetry" + util "github.com/loft-sh/vcluster/pkg/util/context" "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" @@ -106,7 +107,7 @@ func ExecuteStart(ctx context.Context, options *StartOptions) error { } // start managers - syncers, err := setup.StartManagers(controllerCtx) + syncers, err := setup.StartManagers(util.ToRegisterContext(controllerCtx)) if err != nil { return fmt.Errorf("start managers: %w", err) } diff --git a/pkg/controllers/generic/export_syncer.go b/pkg/controllers/generic/export_syncer.go index 9c713ad27d..ddf9ba33ae 100644 --- a/pkg/controllers/generic/export_syncer.go +++ b/pkg/controllers/generic/export_syncer.go @@ -99,8 +99,7 @@ func createExporterFromConfig(ctx *synccontext.RegisterContext, config *vcluster gvk := schema.FromAPIVersionAndKind(config.APIVersion, config.Kind) controllerID := fmt.Sprintf("%s/%s/GenericExport", strings.ToLower(gvk.Kind), strings.ToLower(gvk.Group)) - - mapper, err := generic.NewNamespacedMapper(ctx, obj, translate.Default.PhysicalName) + mapper, err := generic.NewMapper(ctx, obj, translate.Default.PhysicalName) if err != nil { return nil, err } diff --git a/pkg/controllers/register.go b/pkg/controllers/register.go index f742f84b0e..5279a37a56 100644 --- a/pkg/controllers/register.go +++ b/pkg/controllers/register.go @@ -9,153 +9,24 @@ import ( "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/controllers/deploy" "github.com/loft-sh/vcluster/pkg/controllers/generic" - "github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps" - "github.com/loft-sh/vcluster/pkg/controllers/resources/csidrivers" - "github.com/loft-sh/vcluster/pkg/controllers/resources/csinodes" - "github.com/loft-sh/vcluster/pkg/controllers/resources/csistoragecapacities" - "github.com/loft-sh/vcluster/pkg/controllers/resources/endpoints" - "github.com/loft-sh/vcluster/pkg/controllers/resources/events" - "github.com/loft-sh/vcluster/pkg/controllers/resources/ingressclasses" - "github.com/loft-sh/vcluster/pkg/controllers/resources/ingresses" - "github.com/loft-sh/vcluster/pkg/controllers/resources/namespaces" - "github.com/loft-sh/vcluster/pkg/controllers/resources/networkpolicies" - "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes" - "github.com/loft-sh/vcluster/pkg/controllers/resources/persistentvolumeclaims" - "github.com/loft-sh/vcluster/pkg/controllers/resources/persistentvolumes" - "github.com/loft-sh/vcluster/pkg/controllers/resources/poddisruptionbudgets" - "github.com/loft-sh/vcluster/pkg/controllers/resources/pods" - "github.com/loft-sh/vcluster/pkg/controllers/resources/priorityclasses" - "github.com/loft-sh/vcluster/pkg/controllers/resources/secrets" - "github.com/loft-sh/vcluster/pkg/controllers/resources/serviceaccounts" - "github.com/loft-sh/vcluster/pkg/controllers/resources/storageclasses" - "github.com/loft-sh/vcluster/pkg/controllers/resources/volumesnapshots/volumesnapshotclasses" - "github.com/loft-sh/vcluster/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents" - "github.com/loft-sh/vcluster/pkg/controllers/resources/volumesnapshots/volumesnapshots" "github.com/loft-sh/vcluster/pkg/controllers/servicesync" "github.com/loft-sh/vcluster/pkg/controllers/syncer" - synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" "github.com/loft-sh/vcluster/pkg/util/blockingcacheclient" util "github.com/loft-sh/vcluster/pkg/util/context" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "github.com/loft-sh/vcluster/pkg/controllers/coredns" "github.com/loft-sh/vcluster/pkg/controllers/k8sdefaultendpoint" "github.com/loft-sh/vcluster/pkg/controllers/podsecurity" - "github.com/loft-sh/vcluster/pkg/controllers/resources/services" syncertypes "github.com/loft-sh/vcluster/pkg/types" "github.com/loft-sh/vcluster/pkg/util/loghelper" "github.com/pkg/errors" ) -// ExtraControllers that will be started as well -var ExtraControllers []BuildController - -// BuildController is a function to build a new syncer -type BuildController func(ctx *synccontext.RegisterContext) (syncertypes.Object, error) - -func getSyncers(ctx *config.ControllerContext) []BuildController { - return append([]BuildController{ - isEnabled(ctx.Config.Sync.ToHost.Services.Enabled, services.New), - isEnabled(ctx.Config.Sync.ToHost.ConfigMaps.Enabled, configmaps.New), - isEnabled(ctx.Config.Sync.ToHost.Secrets.Enabled, secrets.New), - isEnabled(ctx.Config.Sync.ToHost.Endpoints.Enabled, endpoints.New), - isEnabled(ctx.Config.Sync.ToHost.Pods.Enabled, pods.New), - isEnabled(ctx.Config.Sync.FromHost.Events.Enabled, events.New), - isEnabled(ctx.Config.Sync.ToHost.PersistentVolumeClaims.Enabled, persistentvolumeclaims.New), - isEnabled(ctx.Config.Sync.ToHost.Ingresses.Enabled, ingresses.New), - isEnabled(ctx.Config.Sync.FromHost.IngressClasses.Enabled, ingressclasses.New), - isEnabled(ctx.Config.Sync.ToHost.StorageClasses.Enabled, storageclasses.New), - isEnabled(ctx.Config.Sync.FromHost.StorageClasses.Enabled == "true", storageclasses.NewHostStorageClassSyncer), - isEnabled(ctx.Config.Sync.ToHost.PriorityClasses.Enabled, priorityclasses.New), - isEnabled(ctx.Config.Sync.ToHost.PodDisruptionBudgets.Enabled, poddisruptionbudgets.New), - isEnabled(ctx.Config.Sync.ToHost.NetworkPolicies.Enabled, networkpolicies.New), - isEnabled(ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled, volumesnapshotclasses.New), - isEnabled(ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled, volumesnapshots.New), - isEnabled(ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled, volumesnapshotcontents.New), - isEnabled(ctx.Config.Sync.ToHost.ServiceAccounts.Enabled, serviceaccounts.New), - isEnabled(ctx.Config.Sync.FromHost.CSINodes.Enabled == "true", csinodes.New), - isEnabled(ctx.Config.Sync.FromHost.CSIDrivers.Enabled == "true", csidrivers.New), - isEnabled(ctx.Config.Sync.FromHost.CSIStorageCapacities.Enabled == "true", csistoragecapacities.New), - isEnabled(ctx.Config.Experimental.MultiNamespaceMode.Enabled, namespaces.New), - persistentvolumes.New, - nodes.New, - }, ExtraControllers...) -} - -func isEnabled(enabled bool, fn BuildController) BuildController { - if enabled { - return fn - } - return nil -} - -func CreateSyncers(ctx *config.ControllerContext) ([]syncertypes.Object, error) { - registerContext := util.ToRegisterContext(ctx) - - // register controllers for resource synchronization - syncers := []syncertypes.Object{} - for _, newSyncer := range getSyncers(ctx) { - if newSyncer == nil { - continue - } - - createdController, err := newSyncer(registerContext) - - name := "" - if createdController != nil { - name = createdController.Name() - } - - if err != nil { - return nil, fmt.Errorf("register %s controller: %w", name, err) - } - - loghelper.Infof("Start %s sync controller", name) - syncers = append(syncers, createdController) - } - - return syncers, nil -} - -func ExecuteInitializers(controllerCtx *config.ControllerContext, syncers []syncertypes.Object) error { - registerContext := util.ToRegisterContext(controllerCtx) - - // execute the syncer init functions - for _, s := range syncers { - name := s.Name() - initializer, ok := s.(syncertypes.Initializer) - if ok { - klog.FromContext(controllerCtx.Context).V(1).Info("Execute syncer init", "syncer", name) - err := initializer.Init(registerContext) - if err != nil { - return errors.Wrapf(err, "ensure prerequisites for %s syncer", name) - } - } - } - - return nil -} - -func RegisterIndices(ctx *config.ControllerContext, syncers []syncertypes.Object) error { - registerContext := util.ToRegisterContext(ctx) - for _, s := range syncers { - indexRegisterer, ok := s.(syncertypes.IndicesRegisterer) - if ok { - err := indexRegisterer.RegisterIndices(registerContext) - if err != nil { - return errors.Wrapf(err, "register indices for %s syncer", s.Name()) - } - } - } - - return nil -} - func RegisterControllers(ctx *config.ControllerContext, syncers []syncertypes.Object) error { registerContext := util.ToRegisterContext(ctx) @@ -167,14 +38,14 @@ func RegisterControllers(ctx *config.ControllerContext, syncers []syncertypes.Ob // register controller that maintains pod security standard check if ctx.Config.Policies.PodSecurityStandard != "" { - err := RegisterPodSecurityController(ctx) + err := registerPodSecurityController(ctx) if err != nil { return err } } // register controller that keeps CoreDNS NodeHosts config up to date - err = RegisterCoreDNSController(ctx) + err = registerCoreDNSController(ctx) if err != nil { return err } @@ -186,13 +57,13 @@ func RegisterControllers(ctx *config.ControllerContext, syncers []syncertypes.Ob } // register service syncer to map services between host and virtual cluster - err = RegisterServiceSyncControllers(ctx) + err = registerServiceSyncControllers(ctx) if err != nil { return err } // register generic sync controllers - err = RegisterGenericSyncController(ctx) + err = registerGenericSyncController(ctx) if err != nil { return err } @@ -230,7 +101,7 @@ func RegisterControllers(ctx *config.ControllerContext, syncers []syncertypes.Ob return nil } -func RegisterGenericSyncController(ctx *config.ControllerContext) error { +func registerGenericSyncController(ctx *config.ControllerContext) error { err := generic.CreateExporters(ctx) if err != nil { return err @@ -244,7 +115,7 @@ func RegisterGenericSyncController(ctx *config.ControllerContext) error { return nil } -func RegisterServiceSyncControllers(ctx *config.ControllerContext) error { +func registerServiceSyncControllers(ctx *config.ControllerContext) error { hostNamespace := ctx.Config.WorkloadTargetNamespace if ctx.Config.Experimental.MultiNamespaceMode.Enabled { hostNamespace = ctx.Config.WorkloadNamespace @@ -371,7 +242,7 @@ func parseMapping(mappings []vclusterconfig.ServiceMapping, fromDefaultNamespace return ret, nil } -func RegisterCoreDNSController(ctx *config.ControllerContext) error { +func registerCoreDNSController(ctx *config.ControllerContext) error { controller := &coredns.NodeHostsReconciler{ Client: ctx.VirtualManager.GetClient(), Log: loghelper.New("corednsnodehosts-controller"), @@ -383,7 +254,7 @@ func RegisterCoreDNSController(ctx *config.ControllerContext) error { return nil } -func RegisterPodSecurityController(ctx *config.ControllerContext) error { +func registerPodSecurityController(ctx *config.ControllerContext) error { controller := &podsecurity.Reconciler{ Client: ctx.VirtualManager.GetClient(), PodSecurityStandard: ctx.Config.Policies.PodSecurityStandard, diff --git a/pkg/controllers/resources/configmaps/syncer.go b/pkg/controllers/resources/configmaps/syncer.go index f713fe1c7b..6035b72afd 100644 --- a/pkg/controllers/resources/configmaps/syncer.go +++ b/pkg/controllers/resources/configmaps/syncer.go @@ -25,16 +25,14 @@ func New(ctx *synccontext.RegisterContext) (syncer.Object, error) { return &configMapSyncer{ NamespacedTranslator: translator.NewNamespacedTranslator(ctx, "configmap", &corev1.ConfigMap{}, mappings.ConfigMaps()), - syncAllConfigMaps: ctx.Config.Sync.ToHost.ConfigMaps.All, - multiNamespaceMode: ctx.Config.Experimental.MultiNamespaceMode.Enabled, + syncAllConfigMaps: ctx.Config.Sync.ToHost.ConfigMaps.All, }, nil } type configMapSyncer struct { translator.NamespacedTranslator - syncAllConfigMaps bool - multiNamespaceMode bool + syncAllConfigMaps bool } var _ syncer.IndicesRegisterer = &configMapSyncer{} diff --git a/pkg/controllers/resources/csistoragecapacities/syncer_test.go b/pkg/controllers/resources/csistoragecapacities/syncer_test.go index 508602d326..1d7136608c 100644 --- a/pkg/controllers/resources/csistoragecapacities/syncer_test.go +++ b/pkg/controllers/resources/csistoragecapacities/syncer_test.go @@ -3,6 +3,8 @@ package csistoragecapacities import ( "testing" + "github.com/loft-sh/vcluster/pkg/config" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" "github.com/loft-sh/vcluster/pkg/controllers/resources/storageclasses" @@ -92,7 +94,10 @@ func TestSyncHostStorageClass(t *testing.T) { MaximumVolumeSize: resource.NewQuantity(202, resource.BinarySI), } - generictesting.RunTests(t, []*generictesting.SyncTest{ + generictesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.FromHost.CSIStorageCapacities.Enabled = "true" + return generictesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*generictesting.SyncTest{ { Name: "Sync Up", InitialVirtualState: []runtime.Object{}, @@ -232,7 +237,10 @@ func TestSyncStorageClass(t *testing.T) { MaximumVolumeSize: resource.NewQuantity(202, resource.BinarySI), } - generictesting.RunTests(t, []*generictesting.SyncTest{ + generictesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.FromHost.CSIStorageCapacities.Enabled = "true" + return generictesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*generictesting.SyncTest{ { Name: "Sync Up", InitialVirtualState: []runtime.Object{vSCa, vSCb, labelledNode}, @@ -243,9 +251,11 @@ func TestSyncStorageClass(t *testing.T) { ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ storagev1.SchemeGroupVersion.WithKind(kind): {pObj}, }, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) @@ -268,9 +278,11 @@ func TestSyncStorageClass(t *testing.T) { ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ storagev1.SchemeGroupVersion.WithKind(kind): {pObj}, }, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) @@ -293,9 +305,11 @@ func TestSyncStorageClass(t *testing.T) { ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ storagev1.SchemeGroupVersion.WithKind(kind): {pObj}, }, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) @@ -313,9 +327,11 @@ func TestSyncStorageClass(t *testing.T) { InitialVirtualState: []runtime.Object{vObj, vSCa, vSCb, labelledNode}, ExpectedVirtualState: map[schema.GroupVersionKind][]runtime.Object{}, ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{}, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) @@ -338,9 +354,11 @@ func TestSyncStorageClass(t *testing.T) { ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ storagev1.SchemeGroupVersion.WithKind(kind): {pObjUpdated}, }, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) @@ -363,9 +381,11 @@ func TestSyncStorageClass(t *testing.T) { ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ storagev1.SchemeGroupVersion.WithKind(kind): {pObj}, }, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) @@ -388,9 +408,11 @@ func TestSyncStorageClass(t *testing.T) { ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ storagev1.SchemeGroupVersion.WithKind(kind): {pObj}, }, + AdjustConfig: func(vConfig *config.VirtualClusterConfig) { + vConfig.Sync.FromHost.StorageClasses.Enabled = "false" + vConfig.Sync.ToHost.StorageClasses.Enabled = true + }, Sync: func(ctx *synccontext.RegisterContext) { - ctx.Config.Sync.FromHost.StorageClasses.Enabled = "false" - ctx.Config.Sync.ToHost.StorageClasses.Enabled = true var err error syncCtx, sync := generictesting.FakeStartSyncer(t, ctx, storageclasses.New) _, err = sync.(syncer.Syncer).SyncToHost(syncCtx, vSCa) diff --git a/pkg/controllers/resources/endpoints/translate.go b/pkg/controllers/resources/endpoints/translate.go index 14be4d5e42..890a7e19c2 100644 --- a/pkg/controllers/resources/endpoints/translate.go +++ b/pkg/controllers/resources/endpoints/translate.go @@ -22,7 +22,7 @@ func (s *endpointsSyncer) translate(ctx context.Context, vObj client.Object) *co return endpoints } -func (s *endpointsSyncer) translateSpec(endpoints *corev1.Endpoints) error { +func (s *endpointsSyncer) translateSpec(endpoints *corev1.Endpoints) { // translate the addresses for i, subset := range endpoints.Subsets { for j, addr := range subset.Addresses { @@ -48,17 +48,12 @@ func (s *endpointsSyncer) translateSpec(endpoints *corev1.Endpoints) error { } } } - - return nil } func (s *endpointsSyncer) translateUpdate(ctx context.Context, pObj, vObj *corev1.Endpoints) error { // check subsets translated := vObj.DeepCopy() - err := s.translateSpec(translated) - if err != nil { - return err - } + s.translateSpec(translated) if !equality.Semantic.DeepEqual(translated.Subsets, pObj.Subsets) { pObj.Subsets = translated.Subsets } diff --git a/pkg/controllers/resources/ingresses/syncer.go b/pkg/controllers/resources/ingresses/syncer.go index 3c618d542c..6827c0e083 100644 --- a/pkg/controllers/resources/ingresses/syncer.go +++ b/pkg/controllers/resources/ingresses/syncer.go @@ -51,6 +51,9 @@ func (s *ingressSyncer) Sync(ctx *synccontext.SyncContext, pObj client.Object, v if err := patch.Patch(ctx, pObj, vObj); err != nil { retErr = utilerrors.NewAggregate([]error{retErr, err}) } + if retErr != nil { + s.NamespacedTranslator.EventRecorder().Eventf(pObj, "Warning", "SyncError", "Error syncing: %v", retErr) + } }() pIngress, vIngress, source, target := synccontext.Cast[*networkingv1.Ingress](ctx, pObj, vObj) @@ -95,18 +98,13 @@ func translateIngressAnnotations(annotations map[string]string, ingressNamespace if len(splitted) == 1 { // If value is only "secret" secret := splitted[0] foundSecrets = append(foundSecrets, ingressNamespace+"/"+secret) - pName, err := mappings.VirtualToHostName(secret, ingressNamespace, mappings.Secrets()) - if err == nil { - newAnnotations[k] = pName - } + newAnnotations[k] = mappings.VirtualToHostName(secret, ingressNamespace, mappings.Secrets()) } else if len(splitted) == 2 { // If value is "namespace/secret" namespace := splitted[0] secret := splitted[1] foundSecrets = append(foundSecrets, namespace+"/"+secret) - pName, err := mappings.VirtualToHost(secret, namespace, mappings.Secrets()) - if err == nil { - newAnnotations[k] = pName.Namespace + "/" + pName.Name - } + pName := mappings.VirtualToHost(secret, namespace, mappings.Secrets()) + newAnnotations[k] = pName.Namespace + "/" + pName.Name } else { newAnnotations[k] = v } diff --git a/pkg/controllers/resources/pods/translate.go b/pkg/controllers/resources/pods/translate.go index f10932d7fd..dc6a531068 100644 --- a/pkg/controllers/resources/pods/translate.go +++ b/pkg/controllers/resources/pods/translate.go @@ -88,10 +88,7 @@ func (s *podSyncer) findKubernetesDNSIP(ctx *synccontext.SyncContext) (string, e } // translate service name - pService, err := mappings.VirtualToHostName(specialservices.DefaultKubeDNSServiceName, specialservices.DefaultKubeDNSServiceNamespace, mappings.Services()) - if err != nil { - return "", err - } + pService := mappings.VirtualToHostName(specialservices.DefaultKubeDNSServiceName, specialservices.DefaultKubeDNSServiceNamespace, mappings.Services()) // first try to find the actual synced service, then fallback to a different if we have a suffix (only in the case of integrated coredns) pClient, namespace := specialservices.Default.DNSNamespace(ctx) diff --git a/pkg/controllers/resources/pods/translate/sa_token_secret.go b/pkg/controllers/resources/pods/translate/sa_token_secret.go index d9a30ab5e4..c06d59358c 100644 --- a/pkg/controllers/resources/pods/translate/sa_token_secret.go +++ b/pkg/controllers/resources/pods/translate/sa_token_secret.go @@ -65,8 +65,6 @@ func SATokenSecret(ctx context.Context, pClient client.Client, vPod *corev1.Pod, if err != nil && !kerrors.IsNotFound(err) { return err } - - existingSecret = nil } // create to secret with the given token diff --git a/pkg/controllers/resources/pods/translate/translator.go b/pkg/controllers/resources/pods/translate/translator.go index bdfec3e36a..0bc8ad02bc 100644 --- a/pkg/controllers/resources/pods/translate/translator.go +++ b/pkg/controllers/resources/pods/translate/translator.go @@ -159,7 +159,7 @@ func (t *translator) Translate(ctx context.Context, vPod *corev1.Pod, services [ // convert to core object pPod := translate.Default.ApplyMetadata( vPod, - mappings.Pods().VirtualToHost(ctx, mappings.NamespacedName(vPod), vPod), + mappings.VirtualToHost(vPod.Name, vPod.Namespace, mappings.Pods()), t.syncedLabels, ).(*corev1.Pod) @@ -427,7 +427,6 @@ func (t *translator) translateVolumes(ctx context.Context, pPod *corev1.Pod, vPo pPod.Spec.Volumes[i].RBD.SecretRef.Name = mappings.VirtualToHostName(pPod.Spec.Volumes[i].RBD.SecretRef.Name, vPod.Namespace, mappings.Secrets()) } if pPod.Spec.Volumes[i].FlexVolume != nil && pPod.Spec.Volumes[i].FlexVolume.SecretRef != nil { - pPod.Spec.Volumes[i].FlexVolume.SecretRef.Name = mappings.VirtualToHostName(pPod.Spec.Volumes[i].FlexVolume.SecretRef.Name, vPod.Namespace, mappings.Secrets()) } if pPod.Spec.Volumes[i].Cinder != nil && pPod.Spec.Volumes[i].Cinder.SecretRef != nil { diff --git a/pkg/controllers/resources/pods/translate/translator_test.go b/pkg/controllers/resources/pods/translate/translator_test.go index 626dc54ab2..f62fe00f99 100644 --- a/pkg/controllers/resources/pods/translate/translator_test.go +++ b/pkg/controllers/resources/pods/translate/translator_test.go @@ -4,14 +4,17 @@ import ( "context" "testing" + generictesting "github.com/loft-sh/vcluster/pkg/controllers/syncer/testing" + "github.com/loft-sh/vcluster/pkg/mappings/resources" + "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/util/loghelper" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" "gotest.tools/v3/assert" "gotest.tools/v3/assert/cmp" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestPodAffinityTermsTranslation(t *testing.T) { @@ -210,10 +213,13 @@ func TestVolumeTranslation(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { fakeRecorder := record.NewFakeRecorder(10) + pClient := testingutil.NewFakeClient(scheme.Scheme) + vClient := testingutil.NewFakeClient(scheme.Scheme) + resources.MustRegisterMappings(generictesting.NewFakeRegisterContext(generictesting.NewFakeConfig(), pClient, vClient)) tr := &translator{ eventRecorder: fakeRecorder, log: loghelper.New("pods-syncer-translator-test"), - pClient: fake.NewClientBuilder().Build(), + pClient: pClient, } pPod := testCase.vPod.DeepCopy() diff --git a/pkg/controllers/resources/pods/util.go b/pkg/controllers/resources/pods/util.go index dde9330522..b59325a0f5 100644 --- a/pkg/controllers/resources/pods/util.go +++ b/pkg/controllers/resources/pods/util.go @@ -41,10 +41,7 @@ func SecretNamesFromVolumes(pod *corev1.Pod) []string { // check if projected volume source is a serviceaccount and in such a case // we re-write it as a secret too, handle accordingly if pod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken != nil { - pSecret, err := podtranslate.SecretNameFromPodName(pod.Name, pod.Namespace) - if err == nil { - secrets = append(secrets, pod.Namespace+"/"+pSecret) - } + secrets = append(secrets, pod.Namespace+"/"+podtranslate.SecretNameFromPodName(pod.Name, pod.Namespace)) } } } diff --git a/pkg/controllers/resources/register.go b/pkg/controllers/resources/register.go new file mode 100644 index 0000000000..be594b90ca --- /dev/null +++ b/pkg/controllers/resources/register.go @@ -0,0 +1,125 @@ +package resources + +import ( + "fmt" + + "github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps" + "github.com/loft-sh/vcluster/pkg/controllers/resources/csidrivers" + "github.com/loft-sh/vcluster/pkg/controllers/resources/csinodes" + "github.com/loft-sh/vcluster/pkg/controllers/resources/csistoragecapacities" + "github.com/loft-sh/vcluster/pkg/controllers/resources/endpoints" + "github.com/loft-sh/vcluster/pkg/controllers/resources/events" + "github.com/loft-sh/vcluster/pkg/controllers/resources/ingressclasses" + "github.com/loft-sh/vcluster/pkg/controllers/resources/ingresses" + "github.com/loft-sh/vcluster/pkg/controllers/resources/namespaces" + "github.com/loft-sh/vcluster/pkg/controllers/resources/networkpolicies" + "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes" + "github.com/loft-sh/vcluster/pkg/controllers/resources/persistentvolumeclaims" + "github.com/loft-sh/vcluster/pkg/controllers/resources/persistentvolumes" + "github.com/loft-sh/vcluster/pkg/controllers/resources/poddisruptionbudgets" + "github.com/loft-sh/vcluster/pkg/controllers/resources/pods" + "github.com/loft-sh/vcluster/pkg/controllers/resources/priorityclasses" + "github.com/loft-sh/vcluster/pkg/controllers/resources/secrets" + "github.com/loft-sh/vcluster/pkg/controllers/resources/serviceaccounts" + "github.com/loft-sh/vcluster/pkg/controllers/resources/services" + "github.com/loft-sh/vcluster/pkg/controllers/resources/storageclasses" + "github.com/loft-sh/vcluster/pkg/controllers/resources/volumesnapshots/volumesnapshotclasses" + "github.com/loft-sh/vcluster/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents" + "github.com/loft-sh/vcluster/pkg/controllers/resources/volumesnapshots/volumesnapshots" + synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + syncertypes "github.com/loft-sh/vcluster/pkg/types" + "github.com/loft-sh/vcluster/pkg/util/loghelper" + "github.com/pkg/errors" + "k8s.io/klog/v2" +) + +// ExtraControllers that will be started as well +var ExtraControllers []BuildController + +// BuildController is a function to build a new syncer +type BuildController func(ctx *synccontext.RegisterContext) (syncertypes.Object, error) + +// getSyncers retrieves all syncers that should get created +func getSyncers(ctx *synccontext.RegisterContext) []BuildController { + return append([]BuildController{ + isEnabled(ctx.Config.Sync.ToHost.Services.Enabled, services.New), + isEnabled(ctx.Config.Sync.ToHost.ConfigMaps.Enabled, configmaps.New), + isEnabled(ctx.Config.Sync.ToHost.Secrets.Enabled, secrets.New), + isEnabled(ctx.Config.Sync.ToHost.Endpoints.Enabled, endpoints.New), + isEnabled(ctx.Config.Sync.ToHost.Pods.Enabled, pods.New), + isEnabled(ctx.Config.Sync.FromHost.Events.Enabled, events.New), + isEnabled(ctx.Config.Sync.ToHost.PersistentVolumeClaims.Enabled, persistentvolumeclaims.New), + isEnabled(ctx.Config.Sync.ToHost.Ingresses.Enabled, ingresses.New), + isEnabled(ctx.Config.Sync.FromHost.IngressClasses.Enabled, ingressclasses.New), + isEnabled(ctx.Config.Sync.ToHost.StorageClasses.Enabled, storageclasses.New), + isEnabled(ctx.Config.Sync.FromHost.StorageClasses.Enabled == "true", storageclasses.NewHostStorageClassSyncer), + isEnabled(ctx.Config.Sync.ToHost.PriorityClasses.Enabled, priorityclasses.New), + isEnabled(ctx.Config.Sync.ToHost.PodDisruptionBudgets.Enabled, poddisruptionbudgets.New), + isEnabled(ctx.Config.Sync.ToHost.NetworkPolicies.Enabled, networkpolicies.New), + isEnabled(ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled, volumesnapshotclasses.New), + isEnabled(ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled, volumesnapshots.New), + isEnabled(ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled, volumesnapshotcontents.New), + isEnabled(ctx.Config.Sync.ToHost.ServiceAccounts.Enabled, serviceaccounts.New), + isEnabled(ctx.Config.Sync.FromHost.CSINodes.Enabled == "true", csinodes.New), + isEnabled(ctx.Config.Sync.FromHost.CSIDrivers.Enabled == "true", csidrivers.New), + isEnabled(ctx.Config.Sync.FromHost.CSIStorageCapacities.Enabled == "true", csistoragecapacities.New), + isEnabled(ctx.Config.Experimental.MultiNamespaceMode.Enabled, namespaces.New), + persistentvolumes.New, + nodes.New, + }, ExtraControllers...) +} + +// BuildSyncers builds the syncers +func BuildSyncers(ctx *synccontext.RegisterContext) ([]syncertypes.Object, error) { + // register controllers for resource synchronization + syncers := []syncertypes.Object{} + for _, newSyncer := range getSyncers(ctx) { + if newSyncer == nil { + continue + } + + syncer, err := newSyncer(ctx) + + name := "" + if syncer != nil { + name = syncer.Name() + } + + if err != nil { + return nil, fmt.Errorf("register %s controller: %w", name, err) + } + + loghelper.Infof("Created %s syncer", name) + + // execute initializer + initializer, ok := syncer.(syncertypes.Initializer) + if ok { + klog.FromContext(ctx.Context).V(1).Info("Execute syncer init", "syncer", name) + err := initializer.Init(ctx) + if err != nil { + return nil, errors.Wrapf(err, "ensure prerequisites for %s syncer", name) + } + } + + // execute register indices + indexRegisterer, ok := syncer.(syncertypes.IndicesRegisterer) + if ok { + err := indexRegisterer.RegisterIndices(ctx) + if err != nil { + return nil, errors.Wrapf(err, "register indices for %s syncer", name) + } + } + + syncers = append(syncers, syncer) + } + + return syncers, nil +} + +func isEnabled[T any](enabled bool, fn T) T { + if enabled { + return fn + } + var ret T + return ret +} diff --git a/pkg/controllers/resources/storageclasses/syncer_test.go b/pkg/controllers/resources/storageclasses/syncer_test.go index d5a62d81e9..f25aa05cc3 100644 --- a/pkg/controllers/resources/storageclasses/syncer_test.go +++ b/pkg/controllers/resources/storageclasses/syncer_test.go @@ -3,7 +3,9 @@ package storageclasses import ( "testing" + "github.com/loft-sh/vcluster/pkg/config" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" "gotest.tools/assert" @@ -63,7 +65,10 @@ func TestSync(t *testing.T) { }, } - generictesting.RunTests(t, []*generictesting.SyncTest{ + generictesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.StorageClasses.Enabled = true + return generictesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*generictesting.SyncTest{ { Name: "Sync Down", InitialVirtualState: []runtime.Object{vObject}, diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshotclasses/syncer_test.go b/pkg/controllers/resources/volumesnapshots/volumesnapshotclasses/syncer_test.go index 4366d35fff..b033267d89 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshotclasses/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshotclasses/syncer_test.go @@ -3,7 +3,9 @@ package volumesnapshotclasses import ( "testing" + "github.com/loft-sh/vcluster/pkg/config" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" "gotest.tools/assert" @@ -31,7 +33,10 @@ func TestSync(t *testing.T) { vMoreParamsVSC := vBaseVSC.DeepCopy() vMoreParamsVSC.Parameters["additional"] = "param" - generictesting.RunTests(t, []*generictesting.SyncTest{ + generictesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true + return generictesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*generictesting.SyncTest{ { Name: "Create backward", InitialVirtualState: []runtime.Object{}, diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go index 9eb1b7a9cb..3c4659a8bf 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go @@ -4,8 +4,10 @@ import ( "testing" "time" + "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/constants" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "gotest.tools/assert" "k8s.io/utils/ptr" @@ -158,7 +160,10 @@ func TestSync(t *testing.T) { vDeletingWithStatus := vDeletingWithOneFinalizer.DeepCopy() vDeletingWithStatus.Status = pDeletingWithStatus.Status - generictesting.RunTests(t, []*generictesting.SyncTest{ + generictesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true + return generictesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*generictesting.SyncTest{ { Name: "Create dynamic VolumeSnapshotContent from host", InitialVirtualState: []runtime.Object{vVolumeSnapshot.DeepCopy()}, diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go b/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go index 9aeb7ac747..705288afa3 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go @@ -4,7 +4,9 @@ import ( "testing" "time" + "github.com/loft-sh/vcluster/pkg/config" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "gotest.tools/assert" "k8s.io/utils/ptr" @@ -101,7 +103,10 @@ func TestSync(t *testing.T) { vWithStatus := vPVSourceSnapshot.DeepCopy() vWithStatus.Status = pWithStatus.Status - generictesting.RunTests(t, []*generictesting.SyncTest{ + generictesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true + return generictesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*generictesting.SyncTest{ { Name: "Create with PersistentVolume source", InitialVirtualState: []runtime.Object{vPVSourceSnapshot.DeepCopy()}, diff --git a/pkg/controllers/syncer/syncer_test.go b/pkg/controllers/syncer/syncer_test.go index 7fd8a53f90..cfc92e466f 100644 --- a/pkg/controllers/syncer/syncer_test.go +++ b/pkg/controllers/syncer/syncer_test.go @@ -6,7 +6,8 @@ import ( "sort" "testing" - "github.com/loft-sh/vcluster/pkg/mappings/registermappings" + "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/resources" "github.com/loft-sh/vcluster/pkg/scheme" testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" @@ -34,7 +35,7 @@ type mockSyncer struct { func NewMockSyncer(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { return &mockSyncer{ - NamespacedTranslator: translator.NewNamespacedTranslator(ctx, "secrets", &corev1.Secret{}), + NamespacedTranslator: translator.NewNamespacedTranslator(ctx, "secrets", &corev1.Secret{}, mappings.Secrets()), }, nil } @@ -158,6 +159,7 @@ func TestReconcile(t *testing.T) { translate.NameAnnotation: "a", translate.NamespaceAnnotation: namespaceInVclusterA, translate.UIDAnnotation: "123", + translate.KindAnnotation: corev1.SchemeGroupVersion.WithKind("Secret").String(), }, Labels: map[string]string{ translate.NamespaceLabel: namespaceInVclusterA, @@ -271,7 +273,7 @@ func TestReconcile(t *testing.T) { vClient := testingutil.NewFakeClient(scheme.Scheme, tc.InitialVirtualState...) fakeContext := generictesting.NewFakeRegisterContext(generictesting.NewFakeConfig(), pClient, vClient) - registermappings.MustRegisterMappings(fakeContext) + resources.MustRegisterMappings(fakeContext) syncerImpl, err := tc.Syncer(fakeContext) assert.NilError(t, err) @@ -283,8 +285,6 @@ func TestReconcile(t *testing.T) { vEventRecorder: &testingutil.FakeEventRecorder{}, physicalClient: pClient, - gvk: corev1.SchemeGroupVersion.WithKind("Secret"), - currentNamespace: fakeContext.CurrentNamespace, currentNamespaceClient: fakeContext.CurrentNamespaceClient, diff --git a/pkg/controllers/syncer/testing/context.go b/pkg/controllers/syncer/testing/context.go index 5a4b87773b..cdf3559f41 100644 --- a/pkg/controllers/syncer/testing/context.go +++ b/pkg/controllers/syncer/testing/context.go @@ -6,7 +6,7 @@ import ( vclusterconfig "github.com/loft-sh/vcluster/config" "github.com/loft-sh/vcluster/pkg/config" - "github.com/loft-sh/vcluster/pkg/mappings/registermappings" + "github.com/loft-sh/vcluster/pkg/mappings/resources" "github.com/loft-sh/vcluster/pkg/util/translate" "github.com/loft-sh/vcluster/pkg/util/log" @@ -60,7 +60,7 @@ func NewFakeRegisterContext(vConfig *config.VirtualClusterConfig, pClient *testi PhysicalManager: newFakeManager(pClient), } - registermappings.MustRegisterMappings(registerCtx) + resources.MustRegisterMappings(registerCtx) return registerCtx } diff --git a/pkg/controllers/syncer/translator/cluster_translator.go b/pkg/controllers/syncer/translator/cluster_translator.go index bca11c34a1..f84ef76f69 100644 --- a/pkg/controllers/syncer/translator/cluster_translator.go +++ b/pkg/controllers/syncer/translator/cluster_translator.go @@ -48,12 +48,7 @@ func (n *clusterTranslator) IsManaged(_ context2.Context, pObj client.Object) (b } func (n *clusterTranslator) TranslateMetadata(ctx context2.Context, vObj client.Object) client.Object { - nameNamespace, err := n.Mapper.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj) - if err != nil { - return nil - } - - pObj, err := translate.Default.SetupMetadataWithName(vObj, nameNamespace) + pObj, err := translate.Default.SetupMetadataWithName(vObj, n.Mapper.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj)) if err != nil { return nil } diff --git a/pkg/controllers/syncer/translator/namespaced_translator.go b/pkg/controllers/syncer/translator/namespaced_translator.go index 8c9843fef6..5cee85a909 100644 --- a/pkg/controllers/syncer/translator/namespaced_translator.go +++ b/pkg/controllers/syncer/translator/namespaced_translator.go @@ -95,12 +95,7 @@ func (n *namespacedTranslator) IsManaged(_ context2.Context, pObj client.Object) } func (n *namespacedTranslator) TranslateMetadata(ctx context2.Context, vObj client.Object) client.Object { - nameNamespace, err := n.Mapper.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj) - if err != nil { - return nil - } - - pObj, err := translate.Default.SetupMetadataWithName(vObj, nameNamespace) + pObj, err := translate.Default.SetupMetadataWithName(vObj, n.Mapper.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj)) if err != nil { return nil } diff --git a/pkg/mappings/generic/cluster.go b/pkg/mappings/generic/cluster.go deleted file mode 100644 index c2550f96f5..0000000000 --- a/pkg/mappings/generic/cluster.go +++ /dev/null @@ -1,86 +0,0 @@ -package generic - -import ( - context2 "context" - "fmt" - - "github.com/loft-sh/vcluster/pkg/constants" - synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" - "github.com/loft-sh/vcluster/pkg/mappings" - "github.com/loft-sh/vcluster/pkg/scheme" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" - "github.com/loft-sh/vcluster/pkg/util/translate" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -func NewClusterMapper(ctx *synccontext.RegisterContext, obj client.Object, nameTranslator translate.PhysicalNameClusterFunc, options ...MapperOption) (mappings.Mapper, error) { - gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) - if err != nil { - return nil, fmt.Errorf("retrieve GVK for object failed: %w", err) - } - - mapperOptions := getOptions(options...) - if !mapperOptions.SkipIndex { - err = ctx.VirtualManager.GetFieldIndexer().IndexField(ctx.Context, obj.DeepCopyObject().(client.Object), constants.IndexByPhysicalName, func(rawObj client.Object) []string { - return []string{nameTranslator(rawObj.GetName(), rawObj)} - }) - if err != nil { - return nil, fmt.Errorf("index field: %w", err) - } - } - - return &clusterMapper{ - obj: obj, - gvk: gvk, - nameTranslator: nameTranslator, - virtualClient: ctx.VirtualManager.GetClient(), - }, nil -} - -type clusterMapper struct { - gvk schema.GroupVersionKind - obj client.Object - nameTranslator translate.PhysicalNameClusterFunc - virtualClient client.Client -} - -func (n *clusterMapper) GroupVersionKind() schema.GroupVersionKind { - return n.gvk -} - -func (n *clusterMapper) VirtualToHost(_ context2.Context, req types.NamespacedName, vObj client.Object) types.NamespacedName { - return types.NamespacedName{ - Name: n.nameTranslator(req.Name, vObj), - } -} - -func (n *clusterMapper) HostToVirtual(ctx context2.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { - if pObj != nil { - pAnnotations := pObj.GetAnnotations() - if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { - return types.NamespacedName{ - Namespace: pAnnotations[translate.NamespaceAnnotation], - Name: pAnnotations[translate.NameAnnotation], - } - } - } - - vObj := n.obj.DeepCopyObject().(client.Object) - err := clienthelper.GetByIndex(ctx, n.virtualClient, vObj, constants.IndexByPhysicalName, req.Name) - if err != nil { - if !kerrors.IsNotFound(err) && !kerrors.IsConflict(err) { - panic(err.Error()) - } - - return types.NamespacedName{} - } - - return types.NamespacedName{ - Namespace: vObj.GetNamespace(), - Name: vObj.GetName(), - } -} diff --git a/pkg/mappings/generic/namespaced.go b/pkg/mappings/generic/mapper.go similarity index 53% rename from pkg/mappings/generic/namespaced.go rename to pkg/mappings/generic/mapper.go index c0e1f6f765..bdeb267cbd 100644 --- a/pkg/mappings/generic/namespaced.go +++ b/pkg/mappings/generic/mapper.go @@ -17,27 +17,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -type MapperOption func(options *MapperOptions) +// PhysicalNameWithObjectFunc is a definition to translate a name that also optionally expects a vObj +type PhysicalNameWithObjectFunc func(vName, vNamespace string, vObj client.Object) string -func SkipIndex() MapperOption { - return func(options *MapperOptions) { - options.SkipIndex = true - } -} +// PhysicalNameFunc is a definition to translate a name +type PhysicalNameFunc func(vName, vNamespace string) string -type MapperOptions struct { - SkipIndex bool -} - -func getOptions(options ...MapperOption) *MapperOptions { - newOptions := &MapperOptions{} - for _, option := range options { - option(newOptions) - } - return newOptions +// NewMapper creates a new mapper with a custom physical name func +func NewMapper(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameFunc, options ...MapperOption) (mappings.Mapper, error) { + return NewMapperWithObject(ctx, obj, func(vName, vNamespace string, _ client.Object) string { + return translateName(vName, vNamespace) + }, options...) } -func NewNamespacedMapper(ctx *synccontext.RegisterContext, obj client.Object, translateName translate.PhysicalNameFunc, options ...MapperOption) (mappings.Mapper, error) { +// NewMapperWithObject creates a new mapper with a custom physical name func +func NewMapperWithObject(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameWithObjectFunc, options ...MapperOption) (mappings.Mapper, error) { gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) if err != nil { return nil, fmt.Errorf("retrieve GVK for object failed: %w", err) @@ -46,14 +40,18 @@ func NewNamespacedMapper(ctx *synccontext.RegisterContext, obj client.Object, tr mapperOptions := getOptions(options...) if !mapperOptions.SkipIndex { err = ctx.VirtualManager.GetFieldIndexer().IndexField(ctx.Context, obj.DeepCopyObject().(client.Object), constants.IndexByPhysicalName, func(rawObj client.Object) []string { - return []string{translate.Default.PhysicalNamespace(rawObj.GetNamespace()) + "/" + translateName(rawObj.GetName(), rawObj.GetNamespace())} + if rawObj.GetNamespace() != "" { + return []string{translate.Default.PhysicalNamespace(rawObj.GetNamespace()) + "/" + translateName(rawObj.GetName(), rawObj.GetNamespace(), rawObj)} + } + + return []string{translateName(rawObj.GetName(), rawObj.GetNamespace(), rawObj)} }) if err != nil { return nil, fmt.Errorf("index field: %w", err) } } - return &namespacedMapper{ + return &mapper{ translateName: translateName, virtualClient: ctx.VirtualManager.GetClient(), obj: obj, @@ -61,25 +59,26 @@ func NewNamespacedMapper(ctx *synccontext.RegisterContext, obj client.Object, tr }, nil } -type namespacedMapper struct { - translateName translate.PhysicalNameFunc +type mapper struct { + translateName PhysicalNameWithObjectFunc virtualClient client.Client - obj client.Object - gvk schema.GroupVersionKind + + obj client.Object + gvk schema.GroupVersionKind } -func (n *namespacedMapper) GroupVersionKind() schema.GroupVersionKind { +func (n *mapper) GroupVersionKind() schema.GroupVersionKind { return n.gvk } -func (n *namespacedMapper) VirtualToHost(_ context2.Context, req types.NamespacedName, _ client.Object) types.NamespacedName { +func (n *mapper) VirtualToHost(_ context2.Context, req types.NamespacedName, vObj client.Object) types.NamespacedName { return types.NamespacedName{ Namespace: translate.Default.PhysicalNamespace(req.Namespace), - Name: n.translateName(req.Name, req.Namespace), + Name: n.translateName(req.Name, req.Namespace, vObj), } } -func (n *namespacedMapper) HostToVirtual(ctx context2.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { +func (n *mapper) HostToVirtual(ctx context2.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { if pObj != nil { pAnnotations := pObj.GetAnnotations() if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { @@ -90,8 +89,13 @@ func (n *namespacedMapper) HostToVirtual(ctx context2.Context, req types.Namespa } } + key := req.Name + if req.Namespace != "" { + key = req.Namespace + "/" + req.Name + } + vObj := n.obj.DeepCopyObject().(client.Object) - err := clienthelper.GetByIndex(ctx, n.virtualClient, vObj, constants.IndexByPhysicalName, req.Namespace+"/"+req.Name) + err := clienthelper.GetByIndex(ctx, n.virtualClient, vObj, constants.IndexByPhysicalName, key) if err != nil { if !kerrors.IsNotFound(err) && !kerrors.IsConflict(err) { panic(err.Error()) diff --git a/pkg/mappings/generic/mirror.go b/pkg/mappings/generic/mirror.go index 9583d79ebc..d9c1cbe350 100644 --- a/pkg/mappings/generic/mirror.go +++ b/pkg/mappings/generic/mirror.go @@ -6,35 +6,61 @@ import ( "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/scheme" + "github.com/loft-sh/vcluster/pkg/util/translate" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -func NewMirrorPhysicalMapper(obj client.Object) (mappings.Mapper, error) { +func NewMirrorMapper(obj client.Object) (mappings.Mapper, error) { gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) if err != nil { return nil, fmt.Errorf("retrieve GVK for object failed: %w", err) } - return &mirrorPhysicalMapper{ + return &mirrorMapper{ gvk: gvk, }, nil } -type mirrorPhysicalMapper struct { +type mirrorMapper struct { gvk schema.GroupVersionKind } -func (n *mirrorPhysicalMapper) GroupVersionKind() schema.GroupVersionKind { +func (n *mirrorMapper) GroupVersionKind() schema.GroupVersionKind { return n.gvk } -func (n *mirrorPhysicalMapper) VirtualToHost(_ context.Context, req types.NamespacedName, _ client.Object) types.NamespacedName { - return req +func (n *mirrorMapper) VirtualToHost(_ context.Context, req types.NamespacedName, _ client.Object) types.NamespacedName { + pNamespace := req.Namespace + if pNamespace != "" { + pNamespace = translate.Default.PhysicalNamespace(pNamespace) + } + + return types.NamespacedName{ + Namespace: pNamespace, + Name: req.Name, + } } -func (n *mirrorPhysicalMapper) HostToVirtual(_ context.Context, req types.NamespacedName, _ client.Object) types.NamespacedName { - return req +func (n *mirrorMapper) HostToVirtual(_ context.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { + if pObj != nil { + pAnnotations := pObj.GetAnnotations() + if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { + return types.NamespacedName{ + Namespace: pAnnotations[translate.NamespaceAnnotation], + Name: pAnnotations[translate.NameAnnotation], + } + } + } + + // if a namespace is requested we need to return early here + if req.Namespace != "" { + return types.NamespacedName{} + } + + return types.NamespacedName{ + Name: req.Name, + } } diff --git a/pkg/mappings/generic/options.go b/pkg/mappings/generic/options.go new file mode 100644 index 0000000000..7d8a30c375 --- /dev/null +++ b/pkg/mappings/generic/options.go @@ -0,0 +1,21 @@ +package generic + +type MapperOptions struct { + SkipIndex bool +} + +type MapperOption func(options *MapperOptions) + +func SkipIndex() MapperOption { + return func(options *MapperOptions) { + options.SkipIndex = true + } +} + +func getOptions(options ...MapperOption) *MapperOptions { + newOptions := &MapperOptions{} + for _, option := range options { + option(newOptions) + } + return newOptions +} diff --git a/pkg/mappings/registermappings/register.go b/pkg/mappings/registermappings/register.go deleted file mode 100644 index 54a933f23d..0000000000 --- a/pkg/mappings/registermappings/register.go +++ /dev/null @@ -1,65 +0,0 @@ -package registermappings - -import ( - "fmt" - - synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" - "github.com/loft-sh/vcluster/pkg/mappings" - "github.com/loft-sh/vcluster/pkg/mappings/resources" -) - -type CreateMapper func(ctx *synccontext.RegisterContext) (mappings.Mapper, error) - -var DefaultResourceMappings = []CreateMapper{ - resources.CreateSecretsMapper, - resources.CreateConfigMapsMapper, - resources.CreateCSIDriversMapper, - resources.CreateCSINodesMapper, - resources.CreateCSIStorageCapacitiesMapper, - resources.CreateEndpointsMapper, - resources.CreateEventsMapper, - resources.CreateIngressClassesMapper, - resources.CreateIngressesMapper, - resources.CreateNamespacesMapper, - resources.CreateNetworkPoliciesMapper, - resources.CreateNodesMapper, - resources.CreatePersistentVolumeClaimsMapper, - resources.CreateServiceAccountsMapper, - resources.CreateServiceMapper, - resources.CreatePriorityClassesMapper, - resources.CreatePodDisruptionBudgetsMapper, - resources.CreatePersistentVolumesMapper, - resources.CreatePodsMapper, - resources.CreateStorageClassesMapper, - resources.CreateVolumeSnapshotClassesMapper, - resources.CreateVolumeSnapshotContentsMapper, - resources.CreateVolumeSnapshotsMapper, -} - -func MustRegisterMappings(ctx *synccontext.RegisterContext) { - err := RegisterMappings(ctx) - if err != nil { - panic(err.Error()) - } -} - -func RegisterMappings(ctx *synccontext.RegisterContext) error { - // create mappers - for _, createFunc := range DefaultResourceMappings { - if createFunc == nil { - continue - } - - mapper, err := createFunc(ctx) - if err != nil { - return fmt.Errorf("create mapper: %w", err) - } - - err = mappings.Default.AddMapper(mapper) - if err != nil { - return fmt.Errorf("add mapper %s: %w", mapper.GroupVersionKind().String(), err) - } - } - - return nil -} diff --git a/pkg/mappings/resources/configmaps.go b/pkg/mappings/resources/configmaps.go index 4838580356..fb5f39b974 100644 --- a/pkg/mappings/resources/configmaps.go +++ b/pkg/mappings/resources/configmaps.go @@ -14,7 +14,7 @@ import ( ) func CreateConfigMapsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - mapper, err := generic.NewNamespacedMapper(ctx, &corev1.ConfigMap{}, translate.Default.PhysicalName, generic.SkipIndex()) + mapper, err := generic.NewMapper(ctx, &corev1.ConfigMap{}, translate.Default.PhysicalName, generic.SkipIndex()) if err != nil { return nil, err } diff --git a/pkg/mappings/resources/csidrivers.go b/pkg/mappings/resources/csidrivers.go index 6b45370b45..d69fa9568a 100644 --- a/pkg/mappings/resources/csidrivers.go +++ b/pkg/mappings/resources/csidrivers.go @@ -8,5 +8,5 @@ import ( ) func CreateCSIDriversMapper(_ *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewMirrorPhysicalMapper(&storagev1.CSINode{}) + return generic.NewMirrorMapper(&storagev1.CSINode{}) } diff --git a/pkg/mappings/resources/csinodes.go b/pkg/mappings/resources/csinodes.go index d51131f2c6..843f93882d 100644 --- a/pkg/mappings/resources/csinodes.go +++ b/pkg/mappings/resources/csinodes.go @@ -8,5 +8,5 @@ import ( ) func CreateCSINodesMapper(_ *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewMirrorPhysicalMapper(&storagev1.CSIDriver{}) + return generic.NewMirrorMapper(&storagev1.CSIDriver{}) } diff --git a/pkg/mappings/resources/csistoragecapacities.go b/pkg/mappings/resources/csistoragecapacities.go index 137d55f9d9..87b053496f 100644 --- a/pkg/mappings/resources/csistoragecapacities.go +++ b/pkg/mappings/resources/csistoragecapacities.go @@ -18,7 +18,6 @@ func CreateCSIStorageCapacitiesMapper(ctx *synccontext.RegisterContext) (mapping s := &csiStorageCapacitiesMapper{ physicalClient: ctx.PhysicalManager.GetClient(), } - err := ctx.PhysicalManager.GetFieldIndexer().IndexField(ctx.Context, &storagev1.CSIStorageCapacity{}, constants.IndexByVirtualName, func(rawObj client.Object) []string { return []string{s.HostToVirtual(ctx.Context, types.NamespacedName{Name: rawObj.GetName(), Namespace: rawObj.GetNamespace()}, rawObj).Name} }) diff --git a/pkg/mappings/resources/endpoints.go b/pkg/mappings/resources/endpoints.go index 4eb4610bfa..7c1a7fdab6 100644 --- a/pkg/mappings/resources/endpoints.go +++ b/pkg/mappings/resources/endpoints.go @@ -9,5 +9,5 @@ import ( ) func CreateEndpointsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &corev1.Endpoints{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &corev1.Endpoints{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/ingressclasses.go b/pkg/mappings/resources/ingressclasses.go index 8baac5d3e2..38af88731e 100644 --- a/pkg/mappings/resources/ingressclasses.go +++ b/pkg/mappings/resources/ingressclasses.go @@ -8,5 +8,5 @@ import ( ) func CreateIngressClassesMapper(_ *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewMirrorPhysicalMapper(&networkingv1.IngressClass{}) + return generic.NewMirrorMapper(&networkingv1.IngressClass{}) } diff --git a/pkg/mappings/resources/ingresses.go b/pkg/mappings/resources/ingresses.go index f19ddb3c4d..5bd9def736 100644 --- a/pkg/mappings/resources/ingresses.go +++ b/pkg/mappings/resources/ingresses.go @@ -9,5 +9,5 @@ import ( ) func CreateIngressesMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &networkingv1.Ingress{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &networkingv1.Ingress{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/namespaces.go b/pkg/mappings/resources/namespaces.go index 66f0aa650e..01dc932c66 100644 --- a/pkg/mappings/resources/namespaces.go +++ b/pkg/mappings/resources/namespaces.go @@ -6,11 +6,10 @@ import ( "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) func CreateNamespacesMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewClusterMapper(ctx, &corev1.Namespace{}, func(vName string, _ client.Object) string { + return generic.NewMapper(ctx, &corev1.Namespace{}, func(vName, _ string) string { return translate.Default.PhysicalNamespace(vName) }) } diff --git a/pkg/mappings/resources/networkpolicies.go b/pkg/mappings/resources/networkpolicies.go index 801c73de7e..8693619e94 100644 --- a/pkg/mappings/resources/networkpolicies.go +++ b/pkg/mappings/resources/networkpolicies.go @@ -9,5 +9,5 @@ import ( ) func CreateNetworkPoliciesMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &networkingv1.NetworkPolicy{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &networkingv1.NetworkPolicy{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/nodes.go b/pkg/mappings/resources/nodes.go index 5cffb222f5..906d56e331 100644 --- a/pkg/mappings/resources/nodes.go +++ b/pkg/mappings/resources/nodes.go @@ -8,5 +8,5 @@ import ( ) func CreateNodesMapper(_ *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewMirrorPhysicalMapper(&corev1.Node{}) + return generic.NewMirrorMapper(&corev1.Node{}) } diff --git a/pkg/mappings/resources/persistentvolumeclaims.go b/pkg/mappings/resources/persistentvolumeclaims.go index baa9b5be1b..1946a177d5 100644 --- a/pkg/mappings/resources/persistentvolumeclaims.go +++ b/pkg/mappings/resources/persistentvolumeclaims.go @@ -9,5 +9,5 @@ import ( ) func CreatePersistentVolumeClaimsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &corev1.PersistentVolumeClaim{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &corev1.PersistentVolumeClaim{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/persistentvolumes.go b/pkg/mappings/resources/persistentvolumes.go index 3d5fd571e3..a86ea9f216 100644 --- a/pkg/mappings/resources/persistentvolumes.go +++ b/pkg/mappings/resources/persistentvolumes.go @@ -1,75 +1,26 @@ package resources import ( - "context" - "github.com/loft-sh/vcluster/pkg/constants" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) func CreatePersistentVolumesMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - mapper, err := generic.NewClusterMapper(ctx, &corev1.PersistentVolume{}, translatePersistentVolumeName) - if err != nil { - return nil, err - } - - return &persistentVolumeMapper{ - Mapper: mapper, - - virtualClient: ctx.VirtualManager.GetClient(), - }, nil -} - -type persistentVolumeMapper struct { - mappings.Mapper - - virtualClient client.Client -} - -func (s *persistentVolumeMapper) VirtualToHost(_ context.Context, req types.NamespacedName, vObj client.Object) types.NamespacedName { - return types.NamespacedName{Name: translatePersistentVolumeName(req.Name, vObj)} -} - -func (s *persistentVolumeMapper) HostToVirtual(ctx context.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { - if pObj != nil { - pAnnotations := pObj.GetAnnotations() - if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { - return types.NamespacedName{ - Name: pAnnotations[translate.NameAnnotation], - } + return generic.NewMapperWithObject(ctx, &corev1.PersistentVolume{}, func(name, _ string, vObj client.Object) string { + if vObj == nil { + return name } - } - vObj := &corev1.PersistentVolume{} - err := clienthelper.GetByIndex(ctx, s.virtualClient, vObj, constants.IndexByPhysicalName, req.Name) - if err != nil { - if !kerrors.IsNotFound(err) { - return types.NamespacedName{} + vPv, ok := vObj.(*corev1.PersistentVolume) + if !ok || vPv.Annotations == nil || vPv.Annotations[constants.HostClusterPersistentVolumeAnnotation] == "" { + return translate.Default.PhysicalNameClusterScoped(name) } - return types.NamespacedName{Name: req.Name} - } - - return types.NamespacedName{Name: vObj.GetName()} -} - -func translatePersistentVolumeName(name string, vObj client.Object) string { - if vObj == nil { - return name - } - - vPv, ok := vObj.(*corev1.PersistentVolume) - if !ok || vPv.Annotations == nil || vPv.Annotations[constants.HostClusterPersistentVolumeAnnotation] == "" { - return translate.Default.PhysicalNameClusterScoped(name) - } - - return vPv.Annotations[constants.HostClusterPersistentVolumeAnnotation] + return vPv.Annotations[constants.HostClusterPersistentVolumeAnnotation] + }) } diff --git a/pkg/mappings/resources/poddisruptionbudgets.go b/pkg/mappings/resources/poddisruptionbudgets.go index b25a77b345..d9dc889bc1 100644 --- a/pkg/mappings/resources/poddisruptionbudgets.go +++ b/pkg/mappings/resources/poddisruptionbudgets.go @@ -9,5 +9,5 @@ import ( ) func CreatePodDisruptionBudgetsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &policyv1.PodDisruptionBudget{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &policyv1.PodDisruptionBudget{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/pods.go b/pkg/mappings/resources/pods.go index 61d4d72c2f..357656c5cd 100644 --- a/pkg/mappings/resources/pods.go +++ b/pkg/mappings/resources/pods.go @@ -9,5 +9,5 @@ import ( ) func CreatePodsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &corev1.Pod{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &corev1.Pod{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/priorityclasses.go b/pkg/mappings/resources/priorityclasses.go index 9fd4384e47..0c43660f9d 100644 --- a/pkg/mappings/resources/priorityclasses.go +++ b/pkg/mappings/resources/priorityclasses.go @@ -6,15 +6,14 @@ import ( "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/util/translate" schedulingv1 "k8s.io/api/scheduling/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) func CreatePriorityClassesMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { if !ctx.Config.Sync.ToHost.PriorityClasses.Enabled { - return generic.NewMirrorPhysicalMapper(&schedulingv1.PriorityClass{}) + return generic.NewMirrorMapper(&schedulingv1.PriorityClass{}) } - return generic.NewClusterMapper(ctx, &schedulingv1.PriorityClass{}, func(vName string, _ client.Object) string { + return generic.NewMapper(ctx, &schedulingv1.PriorityClass{}, func(vName, _ string) string { // we have to prefix with vCluster as system is reserved return translate.Default.PhysicalNameClusterScoped(vName) }) diff --git a/pkg/mappings/resources/register.go b/pkg/mappings/resources/register.go new file mode 100644 index 0000000000..ba3ec253dd --- /dev/null +++ b/pkg/mappings/resources/register.go @@ -0,0 +1,80 @@ +package resources + +import ( + "fmt" + + synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + "github.com/loft-sh/vcluster/pkg/mappings" +) + +// ExtraMappers that will be started as well +var ExtraMappers []BuildMapper + +// BuildMapper is a function to build a new mapper +type BuildMapper func(ctx *synccontext.RegisterContext) (mappings.Mapper, error) + +func getMappers(ctx *synccontext.RegisterContext) []BuildMapper { + return append([]BuildMapper{ + CreateSecretsMapper, + CreateConfigMapsMapper, + isEnabled(ctx.Config.Sync.FromHost.CSINodes.Enabled == "true", CreateCSINodesMapper), + isEnabled(ctx.Config.Sync.FromHost.CSIDrivers.Enabled == "true", CreateCSIDriversMapper), + isEnabled(ctx.Config.Sync.FromHost.CSIStorageCapacities.Enabled == "true", CreateCSIStorageCapacitiesMapper), + CreateEndpointsMapper, + CreateEventsMapper, + CreateIngressClassesMapper, + CreateIngressesMapper, + CreateNamespacesMapper, + CreateNetworkPoliciesMapper, + CreateNodesMapper, + CreatePersistentVolumeClaimsMapper, + CreateServiceAccountsMapper, + CreateServiceMapper, + CreatePriorityClassesMapper, + CreatePodDisruptionBudgetsMapper, + CreatePersistentVolumesMapper, + CreatePodsMapper, + CreateStorageClassesMapper, + CreateVolumeSnapshotClassesMapper, + CreateVolumeSnapshotContentsMapper, + CreateVolumeSnapshotsMapper, + }, ExtraMappers...) +} + +func MustRegisterMappings(ctx *synccontext.RegisterContext) { + err := RegisterMappings(ctx) + if err != nil { + panic(err.Error()) + } +} + +func RegisterMappings(ctx *synccontext.RegisterContext) error { + // create mappers + for _, createFunc := range getMappers(ctx) { + if createFunc == nil { + continue + } + + mapper, err := createFunc(ctx) + if err != nil { + return fmt.Errorf("create mapper: %w", err) + } else if mapper == nil { + continue + } + + err = mappings.Default.AddMapper(mapper) + if err != nil { + return fmt.Errorf("add mapper %s: %w", mapper.GroupVersionKind().String(), err) + } + } + + return nil +} + +func isEnabled[T any](enabled bool, fn T) T { + if enabled { + return fn + } + var ret T + return ret +} diff --git a/pkg/mappings/resources/secrets.go b/pkg/mappings/resources/secrets.go index 6ed721aad9..e262d4aea0 100644 --- a/pkg/mappings/resources/secrets.go +++ b/pkg/mappings/resources/secrets.go @@ -9,5 +9,5 @@ import ( ) func CreateSecretsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &corev1.Secret{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &corev1.Secret{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/serviceaccounts.go b/pkg/mappings/resources/serviceaccounts.go index c513d615fa..c9b7e6b687 100644 --- a/pkg/mappings/resources/serviceaccounts.go +++ b/pkg/mappings/resources/serviceaccounts.go @@ -9,5 +9,5 @@ import ( ) func CreateServiceAccountsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &corev1.ServiceAccount{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &corev1.ServiceAccount{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/services.go b/pkg/mappings/resources/services.go index 04ee89c2a9..e762cd35fe 100644 --- a/pkg/mappings/resources/services.go +++ b/pkg/mappings/resources/services.go @@ -9,5 +9,5 @@ import ( ) func CreateServiceMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewNamespacedMapper(ctx, &corev1.Service{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &corev1.Service{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/resources/storageclasses.go b/pkg/mappings/resources/storageclasses.go index 706af13393..1d08734bb1 100644 --- a/pkg/mappings/resources/storageclasses.go +++ b/pkg/mappings/resources/storageclasses.go @@ -6,15 +6,14 @@ import ( "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/util/translate" storagev1 "k8s.io/api/storage/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) func CreateStorageClassesMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { if !ctx.Config.Sync.ToHost.StorageClasses.Enabled { - return generic.NewMirrorPhysicalMapper(&storagev1.StorageClass{}) + return generic.NewMirrorMapper(&storagev1.StorageClass{}) } - return generic.NewClusterMapper(ctx, &storagev1.StorageClass{}, func(name string, _ client.Object) string { + return generic.NewMapper(ctx, &storagev1.StorageClass{}, func(name, _ string) string { return translate.Default.PhysicalNameClusterScoped(name) }) } diff --git a/pkg/mappings/resources/volumesnapshotclasses.go b/pkg/mappings/resources/volumesnapshotclasses.go index 0f49a5d9bb..93d149b86c 100644 --- a/pkg/mappings/resources/volumesnapshotclasses.go +++ b/pkg/mappings/resources/volumesnapshotclasses.go @@ -8,5 +8,5 @@ import ( ) func CreateVolumeSnapshotClassesMapper(_ *synccontext.RegisterContext) (mappings.Mapper, error) { - return generic.NewMirrorPhysicalMapper(&volumesnapshotv1.VolumeSnapshotClass{}) + return generic.NewMirrorMapper(&volumesnapshotv1.VolumeSnapshotClass{}) } diff --git a/pkg/mappings/resources/volumesnapshotcontents.go b/pkg/mappings/resources/volumesnapshotcontents.go index 1c73f370b0..f3529bb7a2 100644 --- a/pkg/mappings/resources/volumesnapshotcontents.go +++ b/pkg/mappings/resources/volumesnapshotcontents.go @@ -1,79 +1,30 @@ package resources import ( - "context" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/loft-sh/vcluster/pkg/constants" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" "github.com/loft-sh/vcluster/pkg/util/translate" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) func CreateVolumeSnapshotContentsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { if !ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled { - return generic.NewMirrorPhysicalMapper(&volumesnapshotv1.VolumeSnapshotContent{}) - } - - mapper, err := generic.NewClusterMapper(ctx, &volumesnapshotv1.VolumeSnapshotContent{}, translateVolumeSnapshotContentName) - if err != nil { - return nil, err + return generic.NewMirrorMapper(&volumesnapshotv1.VolumeSnapshotContent{}) } - return &volumeSnapshotContentMapper{ - Mapper: mapper, - - virtualClient: ctx.VirtualManager.GetClient(), - }, nil -} - -type volumeSnapshotContentMapper struct { - mappings.Mapper - - virtualClient client.Client -} - -func (s *volumeSnapshotContentMapper) VirtualToHost(_ context.Context, req types.NamespacedName, vObj client.Object) types.NamespacedName { - return types.NamespacedName{Name: translateVolumeSnapshotContentName(req.Name, vObj)} -} - -func (s *volumeSnapshotContentMapper) HostToVirtual(ctx context.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { - if pObj != nil { - pAnnotations := pObj.GetAnnotations() - if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { - return types.NamespacedName{ - Name: pAnnotations[translate.NameAnnotation], - } + return generic.NewMapperWithObject(ctx, &volumesnapshotv1.VolumeSnapshotContent{}, func(name, _ string, vObj client.Object) string { + if vObj == nil { + return name } - } - vObj := &volumesnapshotv1.VolumeSnapshotContent{} - err := clienthelper.GetByIndex(ctx, s.virtualClient, vObj, constants.IndexByPhysicalName, req.Name) - if err != nil { - if !kerrors.IsNotFound(err) { - return types.NamespacedName{} + vVSC, ok := vObj.(*volumesnapshotv1.VolumeSnapshotContent) + if !ok || vVSC.Annotations == nil || vVSC.Annotations[constants.HostClusterVSCAnnotation] == "" { + return translate.Default.PhysicalNameClusterScoped(name) } - return types.NamespacedName{Name: req.Name} - } - - return types.NamespacedName{Name: vObj.GetName()} -} - -func translateVolumeSnapshotContentName(name string, vObj client.Object) string { - if vObj == nil { - return name - } - - vVSC, ok := vObj.(*volumesnapshotv1.VolumeSnapshotContent) - if !ok || vVSC.Annotations == nil || vVSC.Annotations[constants.HostClusterVSCAnnotation] == "" { - return translate.Default.PhysicalNameClusterScoped(name) - } - - return vVSC.Annotations[constants.HostClusterVSCAnnotation] + return vVSC.Annotations[constants.HostClusterVSCAnnotation] + }) } diff --git a/pkg/mappings/resources/volumesnapshots.go b/pkg/mappings/resources/volumesnapshots.go index 46f383e965..33c845f977 100644 --- a/pkg/mappings/resources/volumesnapshots.go +++ b/pkg/mappings/resources/volumesnapshots.go @@ -10,8 +10,8 @@ import ( func CreateVolumeSnapshotsMapper(ctx *synccontext.RegisterContext) (mappings.Mapper, error) { if !ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled { - return generic.NewMirrorPhysicalMapper(&volumesnapshotv1.VolumeSnapshot{}) + return generic.NewMirrorMapper(&volumesnapshotv1.VolumeSnapshot{}) } - return generic.NewNamespacedMapper(ctx, &volumesnapshotv1.VolumeSnapshot{}, translate.Default.PhysicalName) + return generic.NewMapper(ctx, &volumesnapshotv1.VolumeSnapshot{}, translate.Default.PhysicalName) } diff --git a/pkg/mappings/mappings.go b/pkg/mappings/types.go similarity index 100% rename from pkg/mappings/mappings.go rename to pkg/mappings/types.go diff --git a/pkg/server/filters/service.go b/pkg/server/filters/service.go index 54ac5b4ef9..a65fd97121 100644 --- a/pkg/server/filters/service.go +++ b/pkg/server/filters/service.go @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/request" @@ -208,7 +209,7 @@ func createService(req *http.Request, decoder encoding.Decoder, localClient clie vService.Name = vService.GenerateName + random.String(5) } - newService := translate.Default.ApplyMetadata(vService, mappings.Services().VirtualToHost(req.Context(), mappings.NamespacedName(vService), vService), syncedLabels).(*corev1.Service) + newService := translate.Default.ApplyMetadata(vService, mappings.Services().VirtualToHost(req.Context(), types.NamespacedName{Name: vService.Name, Namespace: vService.Namespace}, vService), syncedLabels).(*corev1.Service) if newService.Annotations == nil { newService.Annotations = map[string]string{} } diff --git a/pkg/server/indicies.go b/pkg/server/indicies.go index de0084a2c5..c4ca17c53d 100644 --- a/pkg/server/indicies.go +++ b/pkg/server/indicies.go @@ -1,20 +1,20 @@ package server import ( - "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes" "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes/nodeservice" + synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) // RegisterIndices adds the server indices to the managers -func RegisterIndices(ctx *config.ControllerContext) error { +func RegisterIndices(ctx *synccontext.RegisterContext) error { // index services by ip if ctx.Config.Networking.Advanced.ProxyKubelets.ByIP { - err := ctx.LocalManager.GetFieldIndexer().IndexField(ctx.Context, &corev1.Service{}, constants.IndexByClusterIP, func(object client.Object) []string { + err := ctx.PhysicalManager.GetFieldIndexer().IndexField(ctx.Context, &corev1.Service{}, constants.IndexByClusterIP, func(object client.Object) []string { svc := object.(*corev1.Service) if len(svc.Labels) == 0 || svc.Labels[nodeservice.ServiceClusterLabel] != translate.VClusterName { return nil diff --git a/pkg/server/server.go b/pkg/server/server.go index baa640a8b4..092d7cafb1 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -20,13 +20,10 @@ import ( "github.com/loft-sh/vcluster/pkg/server/filters" "github.com/loft-sh/vcluster/pkg/server/handler" servertypes "github.com/loft-sh/vcluster/pkg/server/types" - "github.com/loft-sh/vcluster/pkg/util/blockingcacheclient" "github.com/loft-sh/vcluster/pkg/util/pluginhookclient" "github.com/loft-sh/vcluster/pkg/util/serverhelper" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" @@ -51,7 +48,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -247,54 +243,6 @@ func (s *Server) ServeOnListenerTLS(address string, port int, stopChan <-chan st return nil } -func createCachedClient(ctx context.Context, config *rest.Config, namespace string, restMapper meta.RESTMapper, scheme *runtime.Scheme, registerIndices func(cache cache.Cache) error) (client.Client, error) { - // create cache options - cacheOptions := cache.Options{ - Scheme: scheme, - Mapper: restMapper, - } - if namespace != "" { - cacheOptions.DefaultNamespaces = map[string]cache.Config{namespace: {}} - } - - // create the new cache - clientCache, err := cache.New(config, cacheOptions) - if err != nil { - return nil, err - } - - // register indices - if registerIndices != nil { - err = registerIndices(clientCache) - if err != nil { - return nil, err - } - } - - // start cache - go func() { - err := clientCache.Start(ctx) - if err != nil { - panic(err) - } - }() - clientCache.WaitForCacheSync(ctx) - - // create a client from cache - cachedVirtualClient, err := blockingcacheclient.NewCacheClient(config, client.Options{ - Scheme: scheme, - Mapper: restMapper, - Cache: &client.CacheOptions{ - Reader: clientCache, - }, - }) - if err != nil { - return nil, err - } - - return cachedVirtualClient, nil -} - func (s *Server) buildHandlerChain(serverConfig *server.Config) http.Handler { defaultHandler := DefaultBuildHandlerChain(s.handler, serverConfig) defaultHandler = filters.WithNodeName(defaultHandler, s.currentNamespace, s.fakeKubeletIPs, s.cachedVirtualClient, s.currentNamespaceClient) diff --git a/pkg/setup/managers.go b/pkg/setup/managers.go index 206ab67dea..ea21faeceb 100644 --- a/pkg/setup/managers.go +++ b/pkg/setup/managers.go @@ -3,49 +3,36 @@ package setup import ( "fmt" - "github.com/loft-sh/vcluster/pkg/config" - "github.com/loft-sh/vcluster/pkg/controllers" - "github.com/loft-sh/vcluster/pkg/mappings/registermappings" + syncerresources "github.com/loft-sh/vcluster/pkg/controllers/resources" + synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + mapperresources "github.com/loft-sh/vcluster/pkg/mappings/resources" "github.com/loft-sh/vcluster/pkg/server" syncertypes "github.com/loft-sh/vcluster/pkg/types" - util "github.com/loft-sh/vcluster/pkg/util/context" "k8s.io/klog/v2" ) -func StartManagers(controllerContext *config.ControllerContext) ([]syncertypes.Object, error) { - // register resource mappings - err := registermappings.RegisterMappings(util.ToRegisterContext(controllerContext)) - if err != nil { - return nil, fmt.Errorf("register resource mappings: %w", err) - } - +func StartManagers(ctx *synccontext.RegisterContext) ([]syncertypes.Object, error) { // index fields for server - err = server.RegisterIndices(controllerContext) + err := server.RegisterIndices(ctx) if err != nil { return nil, fmt.Errorf("register server indices: %w", err) } - // init syncers - syncers, err := controllers.CreateSyncers(controllerContext) - if err != nil { - return nil, fmt.Errorf("create syncers: %w", err) - } - - // execute controller initializers to setup prereqs, etc. - err = controllers.ExecuteInitializers(controllerContext, syncers) + // register resource mappings + err = mapperresources.RegisterMappings(ctx) if err != nil { - return nil, fmt.Errorf("execute initializers: %w", err) + return nil, fmt.Errorf("register resource mappings: %w", err) } - // register indices - err = controllers.RegisterIndices(controllerContext, syncers) + // init syncers before starting the managers as they might need to register indices + syncers, err := syncerresources.BuildSyncers(ctx) if err != nil { - return nil, fmt.Errorf("register indices: %w", err) + return nil, fmt.Errorf("create syncers: %w", err) } // start the local manager go func() { - err := controllerContext.LocalManager.Start(controllerContext.Context) + err := ctx.PhysicalManager.Start(ctx.Context) if err != nil { panic(err) } @@ -53,7 +40,7 @@ func StartManagers(controllerContext *config.ControllerContext) ([]syncertypes.O // start the virtual cluster manager go func() { - err := controllerContext.VirtualManager.Start(controllerContext.Context) + err := ctx.VirtualManager.Start(ctx.Context) if err != nil { panic(err) } @@ -61,8 +48,8 @@ func StartManagers(controllerContext *config.ControllerContext) ([]syncertypes.O // Wait for caches to be synced klog.Infof("Starting local & virtual managers...") - controllerContext.LocalManager.GetCache().WaitForCacheSync(controllerContext.Context) - controllerContext.VirtualManager.GetCache().WaitForCacheSync(controllerContext.Context) + ctx.PhysicalManager.GetCache().WaitForCacheSync(ctx.Context) + ctx.VirtualManager.GetCache().WaitForCacheSync(ctx.Context) klog.Infof("Successfully started local & virtual manager") return syncers, nil diff --git a/pkg/util/translate/multi_namespace.go b/pkg/util/translate/multi_namespace.go index 9e3c96f3bc..478b4c661e 100644 --- a/pkg/util/translate/multi_namespace.go +++ b/pkg/util/translate/multi_namespace.go @@ -44,19 +44,6 @@ func (s *multiNamespace) PhysicalNameShort(name, _ string) string { return name } -func (s *multiNamespace) objectPhysicalName(obj runtime.Object) string { - if obj == nil { - return "" - } - - metaAccessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - - return s.PhysicalName(metaAccessor.GetName(), metaAccessor.GetNamespace()) -} - func (s *multiNamespace) PhysicalNameClusterScoped(name string) string { if name == "" { return "" diff --git a/pkg/util/translate/single_namespace.go b/pkg/util/translate/single_namespace.go index cc60aa10b3..949bbab430 100644 --- a/pkg/util/translate/single_namespace.go +++ b/pkg/util/translate/single_namespace.go @@ -59,19 +59,6 @@ func SingleNamespacePhysicalName(name, namespace, suffix string) string { return SafeConcatName(name, "x", namespace, "x", suffix) } -func (s *singleNamespace) objectPhysicalName(obj runtime.Object) string { - if obj == nil { - return "" - } - - metaAccessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - - return s.PhysicalName(metaAccessor.GetName(), metaAccessor.GetNamespace()) -} - func (s *singleNamespace) PhysicalNameClusterScoped(name string) string { if name == "" { return "" @@ -93,19 +80,19 @@ func (s *singleNamespace) IsManaged(obj runtime.Object) bool { // If object-name annotation is not set OR // If object-name annotation is different from actual name gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) - if err == nil && mappings.Has(gvk) { - if metaAccessor.GetAnnotations()[NameAnnotation] == "" || metaAccessor.GetName() != mappings.VirtualToHostName(metaAccessor.GetAnnotations()[NameAnnotation], metaAccessor.GetAnnotations()[NamespaceAnnotation], mappings.ByGVK(gvk)) { + if err == nil { + // check if the name annotation is correct + if metaAccessor.GetAnnotations()[NameAnnotation] == "" || + (mappings.Has(gvk) && metaAccessor.GetName() != mappings.VirtualToHostName(metaAccessor.GetAnnotations()[NameAnnotation], metaAccessor.GetAnnotations()[NamespaceAnnotation], mappings.ByGVK(gvk))) { klog.FromContext(context.TODO()).V(1).Info("Host object doesn't match, because name annotations is wrong", "object", metaAccessor.GetName(), "existingName", metaAccessor.GetName(), "expectedName", mappings.VirtualToHostName(metaAccessor.GetAnnotations()[NameAnnotation], metaAccessor.GetAnnotations()[NamespaceAnnotation], mappings.ByGVK(gvk))) return false } - } - // if kind doesn't match vCluster has probably not synced the object - if metaAccessor.GetAnnotations()[KindAnnotation] != "" { - if err == nil && gvk.String() != metaAccessor.GetAnnotations()[KindAnnotation] { + // if kind doesn't match vCluster has probably not synced the object + if metaAccessor.GetAnnotations()[KindAnnotation] != "" && gvk.String() != metaAccessor.GetAnnotations()[KindAnnotation] { return false } } diff --git a/pkg/util/translate/types.go b/pkg/util/translate/types.go index 0b4e014be1..f8a4755054 100644 --- a/pkg/util/translate/types.go +++ b/pkg/util/translate/types.go @@ -16,12 +16,6 @@ var ( var Default Translator = &singleNamespace{} -// PhysicalNameFunc is a definition to translate a name -type PhysicalNameFunc func(vName, vNamespace string) string - -// PhysicalNameClusterFunc is a definition to translate a cluster name -type PhysicalNameClusterFunc func(vName string, vObj client.Object) string - type Translator interface { // SingleNamespaceTarget signals if we sync all objects into a single namespace SingleNamespaceTarget() bool @@ -81,6 +75,3 @@ type Translator interface { ConvertLabelKey(string) string } - -// PhysicalNamespacedNameTranslator transforms a virtual cluster name to a physical name -type PhysicalNamespacedNameTranslator func(vNN types.NamespacedName, vObj client.Object) string diff --git a/test/e2e/coredns/coredns.go b/test/e2e/coredns/coredns.go index 3cfa267964..425a01c29a 100644 --- a/test/e2e/coredns/coredns.go +++ b/test/e2e/coredns/coredns.go @@ -26,7 +26,7 @@ var _ = ginkgo.Describe("CoreDNS resolves host names correctly", func() { ns = fmt.Sprintf("e2e-coredns-%d-%s", iteration, random.String(5)) // create test namespace - _, err := f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + _, err := f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) framework.ExpectNoError(err) curlPod, err = f.CreateCurlPod(ns) @@ -54,7 +54,7 @@ var _ = ginkgo.Describe("CoreDNS resolves host names correctly", func() { }) ginkgo.It("Test nodes (fake) kubelet is reachable via node hostname", func() { - nodes, err := f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + nodes, err := f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) for _, node := range nodes.Items { hostname := node.Name @@ -68,7 +68,7 @@ var _ = ginkgo.Describe("CoreDNS resolves host names correctly", func() { // sleep to reduce the rate of pod/exec calls url := fmt.Sprintf("https://%s:%d/healthz", hostname, node.Status.DaemonEndpoints.KubeletEndpoint.Port) cmd := []string{"curl", "-k", "-s", "--show-error", url} - stdoutBuffer, stderrBuffer, err := podhelper.ExecBuffered(f.Context, f.VclusterConfig, ns, curlPod.GetName(), curlPod.Spec.Containers[0].Name, cmd, nil) + stdoutBuffer, stderrBuffer, err := podhelper.ExecBuffered(f.Context, f.VClusterConfig, ns, curlPod.GetName(), curlPod.Spec.Containers[0].Name, cmd, nil) framework.ExpectNoError(err) framework.ExpectEmpty(stderrBuffer) framework.ExpectEqual(string(stdoutBuffer), "ok") diff --git a/test/e2e/k8sdefaultendpoint/k8sdefaultendpoint.go b/test/e2e/k8sdefaultendpoint/k8sdefaultendpoint.go index 25b8a801d3..ccf2bee90c 100644 --- a/test/e2e/k8sdefaultendpoint/k8sdefaultendpoint.go +++ b/test/e2e/k8sdefaultendpoint/k8sdefaultendpoint.go @@ -32,7 +32,7 @@ var _ = ginkgo.Describe("map default/kubernetes endpoint to physical vcluster en return false, err } - vclusterEndpoint, err := f.VclusterClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", v1.GetOptions{}) + vclusterEndpoint, err := f.VClusterClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", v1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/manifests/chart.go b/test/e2e/manifests/chart.go index f34fc599c4..5f57b28443 100644 --- a/test/e2e/manifests/chart.go +++ b/test/e2e/manifests/chart.go @@ -38,7 +38,7 @@ var _ = ginkgo.Describe("Helm charts (regular and OCI) are synced and applied as ginkgo.It("Test if configmap for both charts gets applied", func() { err := wait.PollUntilContextTimeout(f.Context, time.Millisecond*500, framework.PollTimeout*2, true, func(ctx context.Context) (bool, error) { - cm, err := f.VclusterClient.CoreV1().ConfigMaps(deploy.VClusterDeployConfigMapNamespace). + cm, err := f.VClusterClient.CoreV1().ConfigMaps(deploy.VClusterDeployConfigMapNamespace). Get(ctx, deploy.VClusterDeployConfigMap, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { @@ -64,7 +64,7 @@ var _ = ginkgo.Describe("Helm charts (regular and OCI) are synced and applied as ginkgo.It("Test nginx release secret existence in vcluster (regular chart)", func() { err := wait.PollUntilContextTimeout(f.Context, time.Millisecond*500, framework.PollTimeout, true, func(ctx context.Context) (bool, error) { - secList, err := f.VclusterClient.CoreV1().Secrets(ChartNamespace).List(ctx, metav1.ListOptions{ + secList, err := f.VClusterClient.CoreV1().Secrets(ChartNamespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(HelmSecretLabels).String(), }) if err != nil { @@ -89,7 +89,7 @@ var _ = ginkgo.Describe("Helm charts (regular and OCI) are synced and applied as ginkgo.It("Test fluent-bit release deployment existence in vcluster (OCI chart)", func() { err := wait.PollUntilContextTimeout(f.Context, time.Millisecond*500, framework.PollTimeout, true, func(ctx context.Context) (bool, error) { - deployList, err := f.VclusterClient.AppsV1().Deployments(ChartOCINamespace).List(ctx, metav1.ListOptions{ + deployList, err := f.VClusterClient.AppsV1().Deployments(ChartOCINamespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(HelmOCIDeploymentLabels).String(), }) if err != nil { diff --git a/test/e2e/manifests/init.go b/test/e2e/manifests/init.go index a135c633cd..61e4009607 100644 --- a/test/e2e/manifests/init.go +++ b/test/e2e/manifests/init.go @@ -19,7 +19,7 @@ var _ = ginkgo.Describe("Init manifests are synced and applied as expected", fun err := f.WaitForInitManifestConfigMapCreation(TestManifestName, TestManifestNamespace) framework.ExpectNoError(err) - manifest, err := f.VclusterClient.CoreV1().ConfigMaps(TestManifestNamespace).Get(f.Context, TestManifestName, metav1.GetOptions{}) + manifest, err := f.VClusterClient.CoreV1().ConfigMaps(TestManifestNamespace).Get(f.Context, TestManifestName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectHaveKey(manifest.Data, "foo", "modified init manifest is supposed to have the foo key") framework.ExpectEqual(manifest.Data["foo"], "bar") @@ -29,7 +29,7 @@ var _ = ginkgo.Describe("Init manifests are synced and applied as expected", fun err := f.WaitForInitManifestConfigMapCreation(TestManifestName2, TestManifestNamespace) framework.ExpectNoError(err) - manifest, err := f.VclusterClient.CoreV1().ConfigMaps(TestManifestNamespace).Get(f.Context, TestManifestName2, metav1.GetOptions{}) + manifest, err := f.VClusterClient.CoreV1().ConfigMaps(TestManifestNamespace).Get(f.Context, TestManifestName2, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectHaveKey(manifest.Data, "foo", "modified init manifest is supposed to have the foo key") framework.ExpectEqual(manifest.Data["foo"], "vcluster") diff --git a/test/e2e/node/node.go b/test/e2e/node/node.go index 716f9dd849..0324046cab 100644 --- a/test/e2e/node/node.go +++ b/test/e2e/node/node.go @@ -14,7 +14,7 @@ var _ = ginkgo.Describe("Node sync", func() { hostNodes, err := f.HostClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) - virtualNodes, err := f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + virtualNodes, err := f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) hostname := "kind-control-plane" diff --git a/test/e2e/servicesync/servicesync.go b/test/e2e/servicesync/servicesync.go index 877f0979b0..902d282a10 100644 --- a/test/e2e/servicesync/servicesync.go +++ b/test/e2e/servicesync/servicesync.go @@ -33,25 +33,25 @@ var _ = ginkgo.Describe("map services from host to virtual cluster and vice vers framework.ExpectError(err) // make sure virtual service doesn't exist initially - _, err = f.VclusterClient.CoreV1().Services("default").Get(ctx, "test", metav1.GetOptions{}) + _, err = f.VClusterClient.CoreV1().Services("default").Get(ctx, "test", metav1.GetOptions{}) framework.ExpectError(err) - _, err = f.VclusterClient.CoreV1().Services("test").Get(ctx, "test", metav1.GetOptions{}) + _, err = f.VClusterClient.CoreV1().Services("test").Get(ctx, "test", metav1.GetOptions{}) framework.ExpectError(err) // physical -> virtual - testMapping(ctx, f.HostClient, "test", "test", f.VclusterClient, "default", "test", true) + testMapping(ctx, f.HostClient, "test", "test", f.VClusterClient, "default", "test", true) // virtual -> physical - testMapping(ctx, f.VclusterClient, "test", "test", f.HostClient, f.VclusterNamespace, "test", f.MultiNamespaceMode) + testMapping(ctx, f.VClusterClient, "test", "test", f.HostClient, f.VclusterNamespace, "test", f.MultiNamespaceMode) }) ginkgo.Context("Should sync endpoint updates for a headless service", func() { ginkgo.It("in host -> vcluster service mapping", func() { - checkEndpointsSync(f.Context, f.HostClient, "test", "nginx", f.VclusterClient, "default", "nginx") + checkEndpointsSync(f.Context, f.HostClient, "test", "nginx", f.VClusterClient, "default", "nginx") }) ginkgo.It("in vcluster -> host service mapping", func() { - checkEndpointsSync(f.Context, f.VclusterClient, "test", "nginx", f.HostClient, f.VclusterNamespace, "nginx") + checkEndpointsSync(f.Context, f.VClusterClient, "test", "nginx", f.HostClient, f.VclusterNamespace, "nginx") }) }) }) diff --git a/test/e2e/syncer/networkpolicies/networkpolicies.go b/test/e2e/syncer/networkpolicies/networkpolicies.go index fb07f38adb..4551c6bd52 100644 --- a/test/e2e/syncer/networkpolicies/networkpolicies.go +++ b/test/e2e/syncer/networkpolicies/networkpolicies.go @@ -34,12 +34,12 @@ var _ = ginkgo.Describe("NetworkPolicies are created as expected", func() { // create test namespaces with different labels var err error - nsA, err = f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + nsA, err = f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: nsNameA, Labels: map[string]string{"key-a": fmt.Sprintf("e2e-syncer-networkpolicies-aaa-%d", iteration)}, }}, metav1.CreateOptions{}) framework.ExpectNoError(err) - nsB, err = f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + nsB, err = f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: nsNameB, Labels: map[string]string{"key-b": fmt.Sprintf("e2e-syncer-networkpolicies-bbb-%d", iteration)}, }}, metav1.CreateOptions{}) @@ -74,7 +74,7 @@ var _ = ginkgo.Describe("NetworkPolicies are created as expected", func() { framework.ExpectNoError(err) f.Log.Info("deny all Egress from the Namespace that hosts curl pod") - networkPolicy, err := f.VclusterClient.NetworkingV1().NetworkPolicies(nsA.GetName()).Create(f.Context, &networkingv1.NetworkPolicy{ + networkPolicy, err := f.VClusterClient.NetworkingV1().NetworkPolicies(nsA.GetName()).Create(f.Context, &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Namespace: nsA.GetName(), Name: "my-egress-policy"}, Spec: networkingv1.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{}, @@ -175,14 +175,14 @@ var _ = ginkgo.Describe("NetworkPolicies are created as expected", func() { func updateNetworkPolicyWithRetryOnConflict(f *framework.Framework, networkPolicy *networkingv1.NetworkPolicy, mutator func(np *networkingv1.NetworkPolicy)) error { return retry.RetryOnConflict(retry.DefaultRetry, func() error { var err error - networkPolicy, err = f.VclusterClient.NetworkingV1().NetworkPolicies(networkPolicy.GetNamespace()).Get(f.Context, networkPolicy.GetName(), metav1.GetOptions{}) + networkPolicy, err = f.VClusterClient.NetworkingV1().NetworkPolicies(networkPolicy.GetNamespace()).Get(f.Context, networkPolicy.GetName(), metav1.GetOptions{}) if err != nil { return err } mutator(networkPolicy) - networkPolicy, err = f.VclusterClient.NetworkingV1().NetworkPolicies(networkPolicy.GetNamespace()).Update(f.Context, networkPolicy, metav1.UpdateOptions{}) + networkPolicy, err = f.VClusterClient.NetworkingV1().NetworkPolicies(networkPolicy.GetNamespace()).Update(f.Context, networkPolicy, metav1.UpdateOptions{}) return err }) } diff --git a/test/e2e/syncer/pods/pods.go b/test/e2e/syncer/pods/pods.go index 2d565304a5..fa7891c2d2 100644 --- a/test/e2e/syncer/pods/pods.go +++ b/test/e2e/syncer/pods/pods.go @@ -41,7 +41,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { ns = fmt.Sprintf("e2e-syncer-pods-%d-%s", iteration, random.String(5)) // create test namespace - _, err := f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + _, err := f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: ns, Labels: map[string]string{initialNsLabelKey: initialNsLabelValue}, }}, metav1.CreateOptions{}) @@ -56,7 +56,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { ginkgo.It("Test pod starts successfully and status is synced back to vcluster pod resource", func() { podName := "test" - _, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + _, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -75,7 +75,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // get current status - vpod, err := f.VclusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) + vpod, err := f.VClusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) framework.ExpectNoError(err) pod, err := f.HostClient.CoreV1().Pods(translate.Default.PhysicalNamespace(ns)).Get(f.Context, translate.Default.PhysicalName(podName, ns), metav1.GetOptions{}) framework.ExpectNoError(err) @@ -83,7 +83,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectEqual(vpod.Status, pod.Status) // check for ephemeralContainers subResource - version, err := f.VclusterClient.Discovery().ServerVersion() + version, err := f.VClusterClient.Discovery().ServerVersion() framework.ExpectNoError(err) // version 1.22 and lesser than that needs legacy flag enabled @@ -98,7 +98,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { }, }} // update ephemeralContainer - vpod, err = f.VclusterClient.CoreV1().Pods(ns).UpdateEphemeralContainers(f.Context, vpod.Name, vpod, metav1.UpdateOptions{}) + vpod, err = f.VClusterClient.CoreV1().Pods(ns).UpdateEphemeralContainers(f.Context, vpod.Name, vpod, metav1.UpdateOptions{}) framework.ExpectNoError(err) err = f.WaitForPodRunning(vpod.Name, vpod.Namespace) framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") @@ -111,7 +111,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { ginkgo.It("Test pod starts successfully and readiness conditions are synced back to vcluster pod resource", func() { podName := "test" - _, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + _, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ ReadinessGates: []corev1.PodReadinessGate{ @@ -133,7 +133,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // get current status - vpod, err := f.VclusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) + vpod, err := f.VClusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) framework.ExpectNoError(err) pod, err := f.HostClient.CoreV1().Pods(translate.Default.PhysicalNamespace(ns)).Get(f.Context, translate.Default.PhysicalName(podName, ns), metav1.GetOptions{}) framework.ExpectNoError(err) @@ -142,7 +142,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { // check for conditions vpod.Status.Conditions = append(vpod.Status.Conditions, corev1.PodCondition{Status: corev1.ConditionFalse, Type: "www.example.com/gate-1"}) // update conditions - vpod, err = f.VclusterClient.CoreV1().Pods(ns).UpdateStatus(f.Context, vpod, metav1.UpdateOptions{}) + vpod, err = f.VClusterClient.CoreV1().Pods(ns).UpdateStatus(f.Context, vpod, metav1.UpdateOptions{}) framework.ExpectNoError(err) err = f.WaitForPodRunning(vpod.Name, vpod.Namespace) framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") @@ -155,7 +155,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { saName := "test-account" // create a service account - _, err := f.VclusterClient.CoreV1().ServiceAccounts(ns).Create(f.Context, &corev1.ServiceAccount{ + _, err := f.VClusterClient.CoreV1().ServiceAccounts(ns).Create(f.Context, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: saName, }, @@ -166,7 +166,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { err = f.WaitForServiceAccount(saName, ns) framework.ExpectNoError(err) - _, err = f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + _, err = f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ ServiceAccountName: saName, @@ -186,7 +186,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // get current state - vpod, err := f.VclusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) + vpod, err := f.VClusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) framework.ExpectNoError(err) // verify that ServiceAccountName is unchanged @@ -203,7 +203,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { filePath := "/test-path" // create a configmap - _, err := f.VclusterClient.CoreV1().ConfigMaps(ns).Create(f.Context, &corev1.ConfigMap{ + _, err := f.VClusterClient.CoreV1().ConfigMaps(ns).Create(f.Context, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cmName, }, @@ -212,7 +212,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { }}, metav1.CreateOptions{}) framework.ExpectNoError(err) - pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + pod, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -286,7 +286,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { filePath := "/test-path" // create a configmap - _, err := f.VclusterClient.CoreV1().Secrets(ns).Create(f.Context, &corev1.Secret{ + _, err := f.VClusterClient.CoreV1().Secrets(ns).Create(f.Context, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, }, @@ -295,7 +295,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { }}, metav1.CreateOptions{}) framework.ExpectNoError(err) - pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + pod, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -365,7 +365,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { svcPort := 80 myProtocol := "https" - _, err := f.VclusterClient.CoreV1().Services(ns).Create(f.Context, &corev1.Service{ + _, err := f.VClusterClient.CoreV1().Services(ns).Create(f.Context, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: svcName}, Spec: corev1.ServiceSpec{ Selector: map[string]string{"doesnt": "matter"}, @@ -378,7 +378,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { err = f.WaitForServiceInSyncerCache(svcName, ns) framework.ExpectNoError(err) - pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + pod, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -441,7 +441,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { ginkgo.It("Test pod contains namespace labels", func() { podName := "test" - pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + pod, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -466,7 +466,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectHaveKey(pPod.GetLabels(), pKey) framework.ExpectEqual(pPod.GetLabels()[pKey], initialNsLabelValue) - namespace, err := f.VclusterClient.CoreV1().Namespaces().Get(f.Context, ns, metav1.GetOptions{}) + namespace, err := f.VClusterClient.CoreV1().Namespaces().Get(f.Context, ns, metav1.GetOptions{}) framework.ExpectNoError(err) additionalLabelKey := "another-one" additionalLabelValue := "good-syncer" @@ -477,7 +477,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { updated := false err = wait.PollUntilContextTimeout(f.Context, time.Second, framework.PollTimeout, true, func(ctx context.Context) (bool, error) { if !updated { - namespace, err = f.VclusterClient.CoreV1().Namespaces().Update(ctx, namespace, metav1.UpdateOptions{}) + namespace, err = f.VClusterClient.CoreV1().Namespaces().Update(ctx, namespace, metav1.UpdateOptions{}) if err != nil && !kerrors.IsConflict(err) { return false, err } @@ -498,7 +498,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { ginkgo.It("Test if service account tokens are synced and mounted through secrets", func() { podName := "test-nginx" - pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + pod, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Namespace: ns, @@ -527,7 +527,8 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectEqual(ok, false, "service account token annotation should not be present") // make sure the secret is created in host cluster - _, err = f.HostClient.CoreV1().Secrets(translate.Default.PhysicalNamespace(ns)).Get(f.Context, podtranslate.SecretNameFromPodName(pod.Name, ns), metav1.GetOptions{}) + secretName := translate.Default.PhysicalName(fmt.Sprintf("%s-sa-token", pod.Name), ns) + _, err = f.HostClient.CoreV1().Secrets(translate.Default.PhysicalNamespace(ns)).Get(f.Context, secretName, metav1.GetOptions{}) framework.ExpectNoError(err) // make sure the project volume for path 'token' is now using a secret instead of service account @@ -535,7 +536,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { if volume.Projected != nil { for _, source := range volume.Projected.Sources { if source.Secret != nil { - framework.ExpectEqual(source.Secret.Name, podtranslate.SecretNameFromPodName(pod.Name, ns)) + framework.ExpectEqual(source.Secret.Name, secretName) } } } diff --git a/test/e2e/syncer/pvc/pvc.go b/test/e2e/syncer/pvc/pvc.go index eafec09a22..4cca0f6c17 100644 --- a/test/e2e/syncer/pvc/pvc.go +++ b/test/e2e/syncer/pvc/pvc.go @@ -31,7 +31,7 @@ var _ = ginkgo.Describe("Persistent volume synced from host cluster", func() { ns = fmt.Sprintf("e2e-syncer-pvc-%d-%s", iteration, random.String(5)) - _, err := f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + _, err := f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: ns, Labels: map[string]string{initialNsLabelKey: initialNsLabelValue}, }}, metav1.CreateOptions{}) @@ -51,7 +51,7 @@ var _ = ginkgo.Describe("Persistent volume synced from host cluster", func() { q, err := resource.ParseQuantity("3Gi") framework.ExpectNoError(err) - _, err = f.VclusterClient.CoreV1().PersistentVolumeClaims(ns).Create(f.Context, &corev1.PersistentVolumeClaim{ + _, err = f.VClusterClient.CoreV1().PersistentVolumeClaims(ns).Create(f.Context, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, }, @@ -71,7 +71,7 @@ var _ = ginkgo.Describe("Persistent volume synced from host cluster", func() { // add a pod bound to the volume as by default storage class on kind is configured with // volume binding mode as WaitForFirstConsumer - _, err = f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + _, err = f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -102,7 +102,7 @@ var _ = ginkgo.Describe("Persistent volume synced from host cluster", func() { // get current status - vpvc, err := f.VclusterClient.CoreV1().PersistentVolumeClaims(ns).Get(f.Context, pvcName, metav1.GetOptions{}) + vpvc, err := f.VClusterClient.CoreV1().PersistentVolumeClaims(ns).Get(f.Context, pvcName, metav1.GetOptions{}) framework.ExpectNoError(err) pvc, err := f.HostClient.CoreV1().PersistentVolumeClaims(translate.Default.PhysicalNamespace(ns)).Get(f.Context, translate.Default.PhysicalName(pvcName, ns), metav1.GetOptions{}) diff --git a/test/e2e/syncer/services/services.go b/test/e2e/syncer/services/services.go index 059d9c6c00..d5e93560ab 100644 --- a/test/e2e/syncer/services/services.go +++ b/test/e2e/syncer/services/services.go @@ -38,7 +38,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { ns = fmt.Sprintf("e2e-syncer-services-%d-%s", iteration, random.String(5)) // create test namespace - _, err := f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + _, err := f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) framework.ExpectNoError(err) }) @@ -66,7 +66,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { }, } - vService, err := f.VclusterClient.CoreV1().Services(ns).Create(f.Context, service, metav1.CreateOptions{}) + vService, err := f.VClusterClient.CoreV1().Services(ns).Create(f.Context, service, metav1.CreateOptions{}) framework.ExpectNoError(err) err = f.WaitForService(vService.Name, vService.Namespace) framework.ExpectNoError(err) @@ -99,13 +99,13 @@ var _ = ginkgo.Describe("Services are created as expected", func() { body, err := json.Marshal(service) framework.ExpectNoError(err) - _, err = f.VclusterClient.RESTClient().Post().AbsPath("/api/v1/namespaces/" + ns + "/services").Body(body).DoRaw(f.Context) + _, err = f.VClusterClient.RESTClient().Post().AbsPath("/api/v1/namespaces/" + ns + "/services").Body(body).DoRaw(f.Context) framework.ExpectNoError(err) err = f.WaitForService(service.Name, service.Namespace) framework.ExpectNoError(err) - _, err = f.VclusterClient.CoreV1().Services(ns).Get(f.Context, service.Name, metav1.GetOptions{}) + _, err = f.VClusterClient.CoreV1().Services(ns).Get(f.Context, service.Name, metav1.GetOptions{}) framework.ExpectNoError(err) _, err = f.HostClient.CoreV1().Services(translate.Default.PhysicalNamespace(ns)).Get(f.Context, translate.Default.PhysicalName(service.Name, service.Namespace), metav1.GetOptions{}) @@ -114,19 +114,19 @@ var _ = ginkgo.Describe("Services are created as expected", func() { ginkgo.It("Services should complete a service status lifecycle", func() { svcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} - svcClient := f.VclusterClient.CoreV1().Services(ns) + svcClient := f.VClusterClient.CoreV1().Services(ns) testSvcName := "test-service-" + utilrand.String(5) testSvcLabels := map[string]string{"test-service-static": "true"} testSvcLabelsFlat := "test-service-static=true" ctx := f.Context - svcList, err := f.VclusterClient.CoreV1().Services("").List(f.Context, metav1.ListOptions{LabelSelector: testSvcLabelsFlat}) + svcList, err := f.VClusterClient.CoreV1().Services("").List(f.Context, metav1.ListOptions{LabelSelector: testSvcLabelsFlat}) framework.ExpectNoError(err, "failed to list Services") w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = testSvcLabelsFlat - return f.VclusterClient.CoreV1().Services(ns).Watch(f.Context, options) + return f.VClusterClient.CoreV1().Services(ns).Watch(f.Context, options) }, } @@ -147,7 +147,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { }, } - _, err = f.VclusterClient.CoreV1().Services(ns).Create(f.Context, testService, metav1.CreateOptions{}) + _, err = f.VClusterClient.CoreV1().Services(ns).Create(f.Context, testService, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("watching for the Service to be added") @@ -172,7 +172,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { f.Log.Infof("Service %s created", testSvcName) ginkgo.By("Getting /status") - DynamicClient, err := dynamic.NewForConfig(f.VclusterConfig) + DynamicClient, err := dynamic.NewForConfig(f.VClusterConfig) framework.ExpectNoError(err, "Failed to initialize the client", err) svcStatusUnstructured, err := DynamicClient.Resource(svcResource).Namespace(ns).Get(ctx, testSvcName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch ServiceStatus of Service %s in namespace %s", testSvcName, ns) @@ -298,7 +298,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { f.Log.Infof("Service %s patched", testSvcName) // Delete service - err = f.VclusterClient.CoreV1().Services(ns).Delete(f.Context, testSvcName, metav1.DeleteOptions{}) + err = f.VClusterClient.CoreV1().Services(ns).Delete(f.Context, testSvcName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete the Service. %v", err) ctx, cancel = context.WithTimeout(ctx, 1*time.Minute) diff --git a/test/e2e/webhook/webhook.go b/test/e2e/webhook/webhook.go index c174825780..40d8b8e55d 100644 --- a/test/e2e/webhook/webhook.go +++ b/test/e2e/webhook/webhook.go @@ -60,7 +60,7 @@ var _ = ginkgo.Describe("AdmissionWebhook", func() { ns = fmt.Sprintf("e2e-webhook-%d-%s", iteration, random.String(5)) // create test namespace - _, err := f.VclusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: map[string]string{uniqueName: "true"}}}, metav1.CreateOptions{}) + _, err := f.VClusterClient.CoreV1().Namespaces().Create(f.Context, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: map[string]string{uniqueName: "true"}}}, metav1.CreateOptions{}) framework.ExpectNoError(err) createWebhookConfigurationReadyNamespace(f, ns) @@ -108,7 +108,7 @@ var _ = ginkgo.Describe("AdmissionWebhook", func() { // prevent cross-talk with webhook configurations being tested. func createWebhookConfigurationReadyNamespace(f *framework.Framework, namespace string) { ctx := f.Context - _, err := f.VclusterClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + _, err := f.VClusterClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace + "-markers", Labels: map[string]string{uniqueName + "-markers": "true"}, @@ -118,7 +118,7 @@ func createWebhookConfigurationReadyNamespace(f *framework.Framework, namespace } func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32, namespace string) func() { - client := f.VclusterClient + client := f.VClusterClient ctx := f.Context ginkgo.By("Registering the webhook via the AdmissionRegistration API") @@ -201,7 +201,7 @@ func createValidatingWebhookConfiguration(f *framework.Framework, config *admiss } f.Log.Fatalf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, uniqueName) } - return f.VclusterClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(ctx, config, metav1.CreateOptions{}) + return f.VClusterClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(ctx, config, metav1.CreateOptions{}) } func newDenyPodWebhookFixture(certCtx *certContext, servicePort int32, namespace string) admissionregistrationv1.ValidatingWebhook { @@ -314,7 +314,7 @@ func newValidatingIsReadyWebhookFixture(certCtx *certContext, servicePort int32, // A webhook created with newValidatingIsReadyWebhookFixture or newMutatingIsReadyWebhookFixture should first be added to // the webhook configuration. func waitWebhookConfigurationReady(f *framework.Framework, namespace string) error { - cmClient := f.VclusterClient.CoreV1().ConfigMaps(namespace + "-markers") + cmClient := f.VClusterClient.CoreV1().ConfigMaps(namespace + "-markers") return wait.PollUntilContextTimeout(f.Context, 100*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) { marker := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -341,7 +341,7 @@ func waitWebhookConfigurationReady(f *framework.Framework, namespace string) err func testWebhook(f *framework.Framework, namespace string) { ginkgo.By("create a pod that should be denied by the webhook") - client := f.VclusterClient + client := f.VClusterClient ctx := f.Context // Creating the pod, the request should be rejected pod := nonCompliantPod() @@ -357,7 +357,7 @@ func testWebhook(f *framework.Framework, namespace string) { } ginkgo.By("create a pod that causes the webhook to hang") - client = f.VclusterClient + client = f.VClusterClient // Creating the pod, the request should be rejected pod = hangingPod() _, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) @@ -440,7 +440,7 @@ func testWebhook(f *framework.Framework, namespace string) { func createNamespace(f *framework.Framework, ns *corev1.Namespace) error { return wait.PollUntilContextTimeout(f.Context, 100*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) { - _, err := f.VclusterClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + _, err := f.VClusterClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) if err != nil { if strings.HasPrefix(err.Error(), "object is being deleted:") { return false, nil @@ -532,7 +532,7 @@ func updateConfigMap(ctx context.Context, c *kubernetes.Clientset, ns, name stri func cleanWebhookTest(f *framework.Framework, namespace string) { ctx := f.Context - client := f.VclusterClient + client := f.VClusterClient _ = client.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{}) _ = client.AppsV1().Deployments(namespace).Delete(ctx, deploymentName, metav1.DeleteOptions{}) _ = client.CoreV1().Secrets(namespace).Delete(ctx, secretName, metav1.DeleteOptions{}) @@ -547,7 +547,7 @@ func cleanWebhookTest(f *framework.Framework, namespace string) { func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { ginkgo.By("Create role binding to let webhook read extension-apiserver-authentication") - client := f.VclusterClient + client := f.VClusterClient ctx := f.Context // Create the role binding to allow the webhook read the extension-apiserver-authentication configmap _, err := client.RbacV1().RoleBindings("kube-system").Create(ctx, &rbacv1.RoleBinding{ @@ -580,7 +580,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { func deployWebhookAndService(f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32, namespace string) { ginkgo.By("Deploying the webhook pod") - client := f.VclusterClient + client := f.VClusterClient ctx := f.Context // Creating the secret that contains the webhook's cert. @@ -716,7 +716,7 @@ func newDeployment(deploymentName string, replicas int32, podLabels map[string]s } func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32, ns string) func() { - client := f.VclusterClient + client := f.VClusterClient ctx := f.Context ginkgo.By("Registering the webhook via the AdmissionRegistration API") @@ -786,7 +786,7 @@ func toBeAttachedPod() *corev1.Pod { func testAttachingPodWebhook(f *framework.Framework, ns string) { ginkgo.By("create a pod") - client := f.VclusterClient + client := f.VClusterClient ctx := f.Context pod := toBeAttachedPod() _, err := client.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) @@ -797,7 +797,7 @@ func testAttachingPodWebhook(f *framework.Framework, ns string) { ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") timer := time.NewTimer(30 * time.Second) defer timer.Stop() - _, err = framework.NewKubectlCommand(f.VclusterKubeconfigFile.Name(), ns, "attach", fmt.Sprintf("--namespace=%v", ns), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() + _, err = framework.NewKubectlCommand(f.VClusterKubeConfigFile.Name(), ns, "attach", fmt.Sprintf("--namespace=%v", ns), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook") if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) { framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a) diff --git a/test/e2e_generic/clusterscope/ingressclasses.go b/test/e2e_generic/clusterscope/ingressclasses.go index e8edc62d4e..6a122e28a9 100644 --- a/test/e2e_generic/clusterscope/ingressclasses.go +++ b/test/e2e_generic/clusterscope/ingressclasses.go @@ -38,7 +38,7 @@ var _ = ginkgo.Describe("Generic sync cluster scoped resources", func() { var ingClass *networkingv1.IngressClass err = wait.PollUntilContextTimeout(ctx, time.Millisecond*500, framework.PollTimeout, true, func(ctx context.Context) (bool, error) { - ingClass, err = f.VclusterClient.NetworkingV1().IngressClasses().Get(ctx, IngressClassName, metav1.GetOptions{}) + ingClass, err = f.VClusterClient.NetworkingV1().IngressClasses().Get(ctx, IngressClassName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -57,7 +57,7 @@ var _ = ginkgo.Describe("Generic sync cluster scoped resources", func() { ginkgo.It("deleting virtual cluster scoped object doesn't delete the physical", func() { ctx := f.Context - err := f.VclusterClient.NetworkingV1().IngressClasses().Delete(ctx, IngressClassName, metav1.DeleteOptions{}) + err := f.VClusterClient.NetworkingV1().IngressClasses().Delete(ctx, IngressClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) // should not delete the physical ingress class @@ -84,7 +84,7 @@ var _ = ginkgo.Describe("Generic sync cluster scoped resources", func() { framework.ExpectNoError(err) err = wait.PollUntilContextTimeout(ctx, time.Millisecond*500, framework.PollTimeout, true, func(ctx context.Context) (bool, error) { - _, err = f.VclusterClient.NetworkingV1().IngressClasses().Get(ctx, IngressClassName, metav1.GetOptions{}) + _, err = f.VClusterClient.NetworkingV1().IngressClasses().Get(ctx, IngressClassName, metav1.GetOptions{}) if kerrors.IsNotFound(err) { return true, nil } diff --git a/test/e2e_isolation_mode/isolation/isolated.go b/test/e2e_isolation_mode/isolation/isolated.go index 6c3c8d8020..c93e3b447d 100644 --- a/test/e2e_isolation_mode/isolation/isolated.go +++ b/test/e2e_isolation_mode/isolation/isolated.go @@ -34,7 +34,7 @@ var _ = ginkgo.Describe("Isolated mode", func() { framework.ExpectNoError(err) ginkgo.By("Check if isolated mode applies baseline PodSecurityStandards to namespaces in vcluster") - ns, err := f.VclusterClient.CoreV1().Namespaces().Get(f.Context, "default", metav1.GetOptions{}) + ns, err := f.VClusterClient.CoreV1().Namespaces().Get(f.Context, "default", metav1.GetOptions{}) framework.ExpectNoError(err) if ns.Labels["pod-security.kubernetes.io/enforce"] != "baseline" { framework.Failf("baseline PodSecurityStandards is not applied") @@ -46,10 +46,10 @@ var _ = ginkgo.Describe("Isolated mode", func() { Name: "my-new-namespace", }, } - _, err = f.VclusterClient.CoreV1().Namespaces().Create(f.Context, nsName, metav1.CreateOptions{}) + _, err = f.VClusterClient.CoreV1().Namespaces().Create(f.Context, nsName, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute, false, func(ctx context.Context) (done bool, err error) { - ns, _ := f.VclusterClient.CoreV1().Namespaces().Get(ctx, nsName.Name, metav1.GetOptions{}) + ns, _ := f.VClusterClient.CoreV1().Namespaces().Get(ctx, nsName.Name, metav1.GetOptions{}) if ns.Status.Phase == corev1.NamespaceActive { return true, nil } @@ -60,7 +60,7 @@ var _ = ginkgo.Describe("Isolated mode", func() { framework.Failf("baseline PodSecurityStandards is not applied for new namespace") } - err = f.VclusterClient.CoreV1().Namespaces().Delete(f.Context, nsName.Name, metav1.DeleteOptions{}) + err = f.VClusterClient.CoreV1().Namespaces().Delete(f.Context, nsName.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -88,16 +88,16 @@ var _ = ginkgo.Describe("Isolated mode", func() { }, } - _, err := f.VclusterClient.CoreV1().Pods("default").Create(f.Context, pod, metav1.CreateOptions{}) + _, err := f.VClusterClient.CoreV1().Pods("default").Create(f.Context, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollUntilContextTimeout(f.Context, time.Second*2, time.Minute*1, false, func(ctx context.Context) (bool, error) { - p, _ := f.VclusterClient.CoreV1().Pods("default").Get(ctx, "nginx", metav1.GetOptions{}) + p, _ := f.VClusterClient.CoreV1().Pods("default").Get(ctx, "nginx", metav1.GetOptions{}) if p.Status.Phase == corev1.PodRunning { return true, nil } - e, _ := f.VclusterClient.CoreV1().Events("default").List(ctx, metav1.ListOptions{TypeMeta: p.TypeMeta}) + e, _ := f.VClusterClient.CoreV1().Events("default").List(ctx, metav1.ListOptions{TypeMeta: p.TypeMeta}) if len(e.Items) > 0 { if strings.Contains(e.Items[0].Message, `Invalid value: "2": must be less than or equal to cpu limit`) { return true, fmt.Errorf(`invalid value: "2": must be less than or equal to cpu limit`) @@ -107,7 +107,7 @@ var _ = ginkgo.Describe("Isolated mode", func() { }) framework.ExpectError(err) - err = f.VclusterClient.CoreV1().Pods("default").Delete(f.Context, pod.Name, metav1.DeleteOptions{}) + err = f.VClusterClient.CoreV1().Pods("default").Delete(f.Context, pod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) }) diff --git a/test/e2e_metrics_proxy/metricsproxy/metrics_proxy.go b/test/e2e_metrics_proxy/metricsproxy/metrics_proxy.go index 88478af9da..a470dcbf4a 100644 --- a/test/e2e_metrics_proxy/metricsproxy/metrics_proxy.go +++ b/test/e2e_metrics_proxy/metricsproxy/metrics_proxy.go @@ -21,7 +21,7 @@ var _ = ginkgo.Describe("Target Namespace", func() { ginkgo.It("Make sure the metrics api service is registered and available", func() { err := wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute*2, false, func(ctx context.Context) (bool, error) { - apiRegistrationClient := apiregistrationv1clientset.NewForConfigOrDie(f.VclusterConfig) + apiRegistrationClient := apiregistrationv1clientset.NewForConfigOrDie(f.VClusterConfig) apiService, err := apiRegistrationClient.APIServices().Get(ctx, "v1beta1.metrics.k8s.io", metav1.GetOptions{}) if err != nil { return false, nil @@ -38,7 +38,7 @@ var _ = ginkgo.Describe("Target Namespace", func() { ginkgo.It("Make sure get nodeMetrics and podMetrics succeed", func() { err := wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute*2, false, func(ctx context.Context) (bool, error) { - metricsClient := metricsv1beta1client.NewForConfigOrDie(f.VclusterConfig) + metricsClient := metricsv1beta1client.NewForConfigOrDie(f.VClusterConfig) nodeMetricsList, err := metricsClient.NodeMetricses().List(ctx, metav1.ListOptions{}) if err != nil { diff --git a/test/e2e_node/node/node.go b/test/e2e_node/node/node.go index 8890e0cdd7..3a04dcdad1 100644 --- a/test/e2e_node/node/node.go +++ b/test/e2e_node/node/node.go @@ -15,7 +15,7 @@ var _ = ginkgo.Describe("Node sync", func() { hostNodes, err := f.HostClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) - virtualNodes, err := f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + virtualNodes, err := f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(hostNodes.Items), len(virtualNodes.Items)) @@ -33,7 +33,7 @@ var _ = ginkgo.Describe("Node sync", func() { }) ginkgo.It("fake nodes have fake kubelet service IPs", func() { - virtualNodes, err := f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + virtualNodes, err := f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) for _, nodes := range virtualNodes.Items { diff --git a/test/e2e_plugin/plugin/plugin.go b/test/e2e_plugin/plugin/plugin.go index dd71fc15cd..e5fe578998 100644 --- a/test/e2e_plugin/plugin/plugin.go +++ b/test/e2e_plugin/plugin/plugin.go @@ -24,7 +24,7 @@ var _ = ginkgo.Describe("plugin", func() { ginkgo.It("test legacy vCluster plugin", func() { // check if deployment is there gomega.Eventually(func() bool { - _, err := f.VclusterClient.AppsV1().Deployments("default").Get(f.Context, "mydeployment", metav1.GetOptions{}) + _, err := f.VClusterClient.AppsV1().Deployments("default").Get(f.Context, "mydeployment", metav1.GetOptions{}) return err == nil }). WithPolling(pollingInterval). @@ -51,7 +51,7 @@ var _ = ginkgo.Describe("plugin", func() { } // create service - err := f.VclusterCRClient.Create(f.Context, service) + err := f.VClusterCRClient.Create(f.Context, service) framework.ExpectNoError(err) // wait for service to become synced @@ -92,7 +92,7 @@ var _ = ginkgo.Describe("plugin", func() { // wait for secret to become synced vSecret := &corev1.Secret{} gomega.Eventually(func() bool { - err := f.VclusterCRClient.Get(f.Context, types.NamespacedName{Name: "test", Namespace: "test"}, vSecret) + err := f.VClusterCRClient.Get(f.Context, types.NamespacedName{Name: "test", Namespace: "test"}, vSecret) return err == nil }). WithPolling(pollingInterval). @@ -110,7 +110,7 @@ var _ = ginkgo.Describe("plugin", func() { // wait for update gomega.Eventually(func() bool { - err := f.VclusterCRClient.Get(f.Context, types.NamespacedName{Name: "test", Namespace: "test"}, vSecret) + err := f.VClusterCRClient.Get(f.Context, types.NamespacedName{Name: "test", Namespace: "test"}, vSecret) return err == nil && string(vSecret.Data["test"]) == "newtest" }). WithPolling(pollingInterval). @@ -123,7 +123,7 @@ var _ = ginkgo.Describe("plugin", func() { // wait for delete within vCluster gomega.Eventually(func() bool { - err := f.VclusterCRClient.Get(f.Context, types.NamespacedName{Name: "test", Namespace: "test"}, vSecret) + err := f.VClusterCRClient.Get(f.Context, types.NamespacedName{Name: "test", Namespace: "test"}, vSecret) return kerrors.IsNotFound(err) }). WithPolling(pollingInterval). diff --git a/test/e2e_scheduler/scheduler/scheduler.go b/test/e2e_scheduler/scheduler/scheduler.go index c89e42bda1..0be269d5ab 100644 --- a/test/e2e_scheduler/scheduler/scheduler.go +++ b/test/e2e_scheduler/scheduler/scheduler.go @@ -16,7 +16,7 @@ var _ = ginkgo.Describe("Scheduler sync", func() { f := framework.DefaultFramework ginkgo.It("Use taints and toleration to assign virtual node to pod", func() { ginkgo.By("Add taints to virtual nodes only") - virtualNodes, err := f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + virtualNodes, err := f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) for _, vnode := range virtualNodes.Items { @@ -25,7 +25,7 @@ var _ = ginkgo.Describe("Scheduler sync", func() { Value: "value1", Effect: corev1.TaintEffectNoSchedule, }) - _, err = f.VclusterClient.CoreV1().Nodes().Update(f.Context, &vnode, metav1.UpdateOptions{}) + _, err = f.VClusterClient.CoreV1().Nodes().Update(f.Context, &vnode, metav1.UpdateOptions{}) framework.ExpectNoError(err) } @@ -37,7 +37,7 @@ var _ = ginkgo.Describe("Scheduler sync", func() { hostNodesTaints[hnode.Name] = hnode.Spec.Taints } - virtualNodes, err = f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + virtualNodes, err = f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) virtualNodesTaints := make(map[string][]corev1.Taint) @@ -75,11 +75,11 @@ var _ = ginkgo.Describe("Scheduler sync", func() { }, } - _, err = f.VclusterClient.CoreV1().Pods(nsName).Create(f.Context, pod, metav1.CreateOptions{}) + _, err = f.VClusterClient.CoreV1().Pods(nsName).Create(f.Context, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute*2, false, func(ctx context.Context) (bool, error) { - p, _ := f.VclusterClient.CoreV1().Pods(nsName).Get(ctx, podName, metav1.GetOptions{}) + p, _ := f.VClusterClient.CoreV1().Pods(nsName).Get(ctx, podName, metav1.GetOptions{}) if p.Status.Phase == corev1.PodRunning { return true, nil } @@ -107,11 +107,11 @@ var _ = ginkgo.Describe("Scheduler sync", func() { }, } - _, err = f.VclusterClient.CoreV1().Pods(nsName).Create(f.Context, pod1, metav1.CreateOptions{}) + _, err = f.VClusterClient.CoreV1().Pods(nsName).Create(f.Context, pod1, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute*2, false, func(ctx context.Context) (bool, error) { - p, _ := f.VclusterClient.CoreV1().Pods(nsName).Get(ctx, pod1Name, metav1.GetOptions{}) + p, _ := f.VClusterClient.CoreV1().Pods(nsName).Get(ctx, pod1Name, metav1.GetOptions{}) if p.Status.Phase == corev1.PodRunning { return true, nil } @@ -120,16 +120,16 @@ var _ = ginkgo.Describe("Scheduler sync", func() { framework.ExpectError(err) ginkgo.By("remove taints from virtual node and delete namespace from vcluster") - vNodes, err := f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + vNodes, err := f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) for _, vnode := range vNodes.Items { vnode.Spec.Taints = vnode.Spec.Taints[:len(vnode.Spec.Taints)-1] - _, err = f.VclusterClient.CoreV1().Nodes().Update(f.Context, &vnode, metav1.UpdateOptions{}) + _, err = f.VClusterClient.CoreV1().Nodes().Update(f.Context, &vnode, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - virtualNodes, err = f.VclusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) + virtualNodes, err = f.VClusterClient.CoreV1().Nodes().List(f.Context, metav1.ListOptions{}) framework.ExpectNoError(err) virtualNodesTaints = make(map[string][]corev1.Taint) @@ -139,10 +139,10 @@ var _ = ginkgo.Describe("Scheduler sync", func() { framework.ExpectEqual(true, reflect.DeepEqual(hostNodesTaints, virtualNodesTaints)) ginkgo.By("delete pods from vcluster") - err = f.VclusterClient.CoreV1().Pods(nsName).Delete(f.Context, podName, metav1.DeleteOptions{}) + err = f.VClusterClient.CoreV1().Pods(nsName).Delete(f.Context, podName, metav1.DeleteOptions{}) framework.ExpectNoError(err) - err = f.VclusterClient.CoreV1().Pods(nsName).Delete(f.Context, pod1Name, metav1.DeleteOptions{}) + err = f.VClusterClient.CoreV1().Pods(nsName).Delete(f.Context, pod1Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) }) diff --git a/test/e2e_scheduler/scheduler/waitforfirstconsumer.go b/test/e2e_scheduler/scheduler/waitforfirstconsumer.go index 8df1e7fb9f..a50fca41a1 100644 --- a/test/e2e_scheduler/scheduler/waitforfirstconsumer.go +++ b/test/e2e_scheduler/scheduler/waitforfirstconsumer.go @@ -103,12 +103,12 @@ var _ = ginkgo.Describe("Schedule a Statefulset with WaitForFirstConsumer PVCs", // ceate workload.Spec.VolumeClaimTemplates[0].Spec.StorageClassName = &scToUse.Name - err = f.VclusterCRClient.Create(f.Context, workload) + err = f.VClusterCRClient.Create(f.Context, workload) framework.ExpectNoError(err) // wait for it to start running err = wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute*2, false, func(ctx context.Context) (bool, error) { ss := &appsv1.StatefulSet{} - err := f.VclusterCRClient.Get(ctx, types.NamespacedName{Name: workload.Name, Namespace: workload.Namespace}, ss) + err := f.VClusterCRClient.Get(ctx, types.NamespacedName{Name: workload.Name, Namespace: workload.Namespace}, ss) if err != nil { fmt.Fprintf(ginkgo.GinkgoWriter, "failed to fetch statefulset %q with err err: %v\n", workload.Name, err) return false, nil diff --git a/test/e2e_target_namespace/targetnamespace/target_namespace.go b/test/e2e_target_namespace/targetnamespace/target_namespace.go index 09b14283ed..f827940d3d 100644 --- a/test/e2e_target_namespace/targetnamespace/target_namespace.go +++ b/test/e2e_target_namespace/targetnamespace/target_namespace.go @@ -37,11 +37,11 @@ var _ = ginkgo.Describe("Target Namespace", func() { }, } - _, err := f.VclusterClient.CoreV1().Pods("default").Create(f.Context, pod, metav1.CreateOptions{}) + _, err := f.VClusterClient.CoreV1().Pods("default").Create(f.Context, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollUntilContextTimeout(f.Context, time.Second, time.Minute*2, false, func(ctx context.Context) (bool, error) { - p, _ := f.VclusterClient.CoreV1().Pods("default").Get(ctx, "nginx", metav1.GetOptions{}) + p, _ := f.VClusterClient.CoreV1().Pods("default").Get(ctx, "nginx", metav1.GetOptions{}) if p.Status.Phase == corev1.PodRunning { return true, nil } diff --git a/test/framework/framework.go b/test/framework/framework.go index 805a5b31aa..526edfecb0 100644 --- a/test/framework/framework.go +++ b/test/framework/framework.go @@ -58,22 +58,22 @@ type Framework struct { // host kubernetes cluster were we are testing in HostCRClient client.Client - // VclusterConfig is the kubernetes rest config of the current + // VClusterConfig is the kubernetes rest config of the current // vcluster instance which we are testing - VclusterConfig *rest.Config + VClusterConfig *rest.Config - // VclusterClient is the kubernetes client of the current + // VClusterClient is the kubernetes client of the current // vcluster instance which we are testing - VclusterClient *kubernetes.Clientset + VClusterClient *kubernetes.Clientset - // VclusterCRClient is the controller runtime client of the current + // VClusterCRClient is the controller runtime client of the current // vcluster instance which we are testing - VclusterCRClient client.Client + VClusterCRClient client.Client - // VclusterKubeconfigFile is a file containing kube config + // VClusterKubeConfigFile is a file containing kube config // of the current vcluster instance which we are testing. // This file shall be deleted in the end of the test suite execution. - VclusterKubeconfigFile *os.File + VClusterKubeConfigFile *os.File // Scheme is the global scheme to use Scheme *runtime.Scheme @@ -215,10 +215,10 @@ func CreateFramework(ctx context.Context, scheme *runtime.Scheme) error { HostConfig: hostConfig, HostClient: hostClient, HostCRClient: hostCRClient, - VclusterConfig: vclusterConfig, - VclusterClient: vclusterClient, - VclusterCRClient: vclusterCRClient, - VclusterKubeconfigFile: vKubeconfigFile, + VClusterConfig: vclusterConfig, + VClusterClient: vclusterClient, + VClusterCRClient: vclusterCRClient, + VClusterKubeConfigFile: vKubeconfigFile, Scheme: scheme, Log: l, ClientTimeout: timeout, @@ -230,5 +230,5 @@ func CreateFramework(ctx context.Context, scheme *runtime.Scheme) error { } func (f *Framework) Cleanup() error { - return os.Remove(f.VclusterKubeconfigFile.Name()) + return os.Remove(f.VClusterKubeConfigFile.Name()) } diff --git a/test/framework/util.go b/test/framework/util.go index 6b02729325..6b0c1b4025 100644 --- a/test/framework/util.go +++ b/test/framework/util.go @@ -29,7 +29,7 @@ func (f *Framework) WaitForPodRunning(podName string, ns string) error { if pod.Status.Phase != corev1.PodRunning { return false, nil } - vpod, err := f.VclusterClient.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) + vpod, err := f.VClusterClient.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -97,7 +97,7 @@ func (f *Framework) WaitForPersistentVolumeClaimBound(pvcName, ns string) error return false, nil } - vpvc, err := f.VclusterClient.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) + vpvc, err := f.VClusterClient.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -116,7 +116,7 @@ func (f *Framework) WaitForPersistentVolumeClaimBound(pvcName, ns string) error func (f *Framework) WaitForInitManifestConfigMapCreation(configMapName, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Millisecond*500, PollTimeout, true, func(ctx context.Context) (bool, error) { - _, err := f.VclusterClient.CoreV1().ConfigMaps(ns).Get(ctx, configMapName, metav1.GetOptions{}) + _, err := f.VClusterClient.CoreV1().ConfigMaps(ns).Get(ctx, configMapName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -130,7 +130,7 @@ func (f *Framework) WaitForInitManifestConfigMapCreation(configMapName, ns strin func (f *Framework) WaitForServiceAccount(saName string, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - _, err := f.VclusterClient.CoreV1().ServiceAccounts(ns).Get(ctx, saName, metav1.GetOptions{}) + _, err := f.VClusterClient.CoreV1().ServiceAccounts(ns).Get(ctx, saName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -162,7 +162,7 @@ func (f *Framework) WaitForServiceInSyncerCache(serviceName string, ns string) e annotationKey := "e2e-test-bump" updated := false return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - vService, err := f.VclusterClient.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{}) + vService, err := f.VClusterClient.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -175,7 +175,7 @@ func (f *Framework) WaitForServiceInSyncerCache(serviceName string, ns string) e vService.Annotations = map[string]string{} } vService.Annotations[annotationKey] = "arbitrary" - _, err = f.VclusterClient.CoreV1().Services(ns).Update(ctx, vService, metav1.UpdateOptions{}) + _, err = f.VClusterClient.CoreV1().Services(ns).Update(ctx, vService, metav1.UpdateOptions{}) if err != nil { if kerrors.IsConflict(err) || kerrors.IsNotFound(err) { return false, nil @@ -199,7 +199,7 @@ func (f *Framework) WaitForServiceInSyncerCache(serviceName string, ns string) e } func (f *Framework) DeleteTestNamespace(ns string, waitUntilDeleted bool) error { - err := f.VclusterClient.CoreV1().Namespaces().Delete(f.Context, ns, metav1.DeleteOptions{}) + err := f.VClusterClient.CoreV1().Namespaces().Delete(f.Context, ns, metav1.DeleteOptions{}) if err != nil { if kerrors.IsNotFound(err) { return nil @@ -210,7 +210,7 @@ func (f *Framework) DeleteTestNamespace(ns string, waitUntilDeleted bool) error return nil } return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - _, err = f.VclusterClient.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) + _, err = f.VClusterClient.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) if kerrors.IsNotFound(err) { return true, nil } @@ -225,7 +225,7 @@ func (f *Framework) GetDefaultSecurityContext() *corev1.SecurityContext { } func (f *Framework) CreateCurlPod(ns string) (*corev1.Pod, error) { - return f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + return f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "curl"}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -247,7 +247,7 @@ func (f *Framework) CreateNginxPodAndService(ns string) (*corev1.Pod, *corev1.Se serviceName := "nginx" labels := map[string]string{"app": "nginx"} - pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + pod, err := f.VClusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Labels: labels, @@ -267,7 +267,7 @@ func (f *Framework) CreateNginxPodAndService(ns string) (*corev1.Pod, *corev1.Se return nil, nil, err } - service, err := f.VclusterClient.CoreV1().Services(ns).Create(f.Context, &corev1.Service{ + service, err := f.VClusterClient.CoreV1().Services(ns).Create(f.Context, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: ns, @@ -312,12 +312,12 @@ func (f *Framework) TestServiceIsEventuallyUnreachable(curlPod *corev1.Pod, serv func (f *Framework) curlService(_ context.Context, curlPod *corev1.Pod, service *corev1.Service) ([]byte, []byte, error) { url := fmt.Sprintf("http://%s.%s.svc:%d/", service.GetName(), service.GetNamespace(), service.Spec.Ports[0].Port) cmd := []string{"curl", "-s", "--show-error", "-o", "/dev/null", "-w", "%{http_code}", "--max-time", "2", url} - return podhelper.ExecBuffered(f.Context, f.VclusterConfig, curlPod.GetNamespace(), curlPod.GetName(), curlPod.Spec.Containers[0].Name, cmd, nil) + return podhelper.ExecBuffered(f.Context, f.VClusterConfig, curlPod.GetNamespace(), curlPod.GetName(), curlPod.Spec.Containers[0].Name, cmd, nil) } func (f *Framework) CreateEgressNetworkPolicyForDNS(ctx context.Context, ns string) (*networkingv1.NetworkPolicy, error) { UDPProtocol := corev1.ProtocolUDP - return f.VclusterClient.NetworkingV1().NetworkPolicies(ns).Create(ctx, &networkingv1.NetworkPolicy{ + return f.VClusterClient.NetworkingV1().NetworkPolicies(ns).Create(ctx, &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: "allow-coredns-egress"}, Spec: networkingv1.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{},