From 4ec22f9d4a5f64f50c35b4c33306db9edad92a40 Mon Sep 17 00:00:00 2001 From: Vicente Cheng Date: Fri, 4 Oct 2024 11:44:17 +0800 Subject: [PATCH] add mutator --- cmd/node-disk-manager-webhook/main.go | 160 ++++++++++++++++++++++++++ pkg/webhook/blockdevice/mutator.go | 88 ++++++++++++++ pkg/webhook/blockdevice/validator.go | 58 ++++++++++ 3 files changed, 306 insertions(+) create mode 100644 cmd/node-disk-manager-webhook/main.go create mode 100644 pkg/webhook/blockdevice/mutator.go create mode 100644 pkg/webhook/blockdevice/validator.go diff --git a/cmd/node-disk-manager-webhook/main.go b/cmd/node-disk-manager-webhook/main.go new file mode 100644 index 00000000..4b4b5cdc --- /dev/null +++ b/cmd/node-disk-manager-webhook/main.go @@ -0,0 +1,160 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/harvester/webhook/pkg/config" + "github.com/harvester/webhook/pkg/server" + "github.com/harvester/webhook/pkg/server/admission" + "github.com/rancher/wrangler/v3/pkg/kubeconfig" + "github.com/rancher/wrangler/v3/pkg/signals" + "github.com/rancher/wrangler/v3/pkg/start" + "github.com/sirupsen/logrus" + "github.com/urfave/cli/v2" + "k8s.io/client-go/rest" + + ctldisk "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io" + ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1" + "github.com/harvester/node-disk-manager/pkg/webhook/blockdevice" +) + +const webhookName = "harvester-node-disk-manager-webhook" + +type resourceCaches struct { + bdCache ctldiskv1.BlockDeviceCache +} + +func main() { + var options config.Options + var logLevel string + + flags := []cli.Flag{ + &cli.StringFlag{ + Name: "loglevel", + Usage: "Specify log level", + EnvVars: []string{"LOGLEVEL"}, + Value: "info", + Destination: &logLevel, + }, + &cli.IntFlag{ + Name: "threadiness", + EnvVars: []string{"THREADINESS"}, + Usage: "Specify controller threads", + Value: 5, + Destination: &options.Threadiness, + }, + &cli.IntFlag{ + Name: "https-port", + EnvVars: []string{"WEBHOOK_SERVER_HTTPS_PORT"}, + Usage: "HTTPS listen port", + Value: 8443, + Destination: &options.HTTPSListenPort, + }, + &cli.StringFlag{ + Name: "namespace", + EnvVars: []string{"NAMESPACE"}, + Destination: &options.Namespace, + Usage: "The harvester namespace", + Value: "harvester-system", + Required: true, + }, + &cli.StringFlag{ + Name: "controller-user", + EnvVars: []string{"CONTROLLER_USER_NAME"}, + Destination: &options.ControllerUsername, + Value: "harvester-node-disk-manager-webhook", + Usage: "The harvester controller username", + }, + &cli.StringFlag{ + Name: "gc-user", + EnvVars: []string{"GARBAGE_COLLECTION_USER_NAME"}, + Destination: &options.GarbageCollectionUsername, + Usage: "The system username that performs garbage collection", + Value: "system:serviceaccount:kube-system:generic-garbage-collector", + }, + } + + cfg, err := kubeconfig.GetNonInteractiveClientConfig(os.Getenv("KUBECONFIG")).ClientConfig() + if err != nil { + logrus.Fatal(err) + } + + ctx := signals.SetupSignalContext() + + app := cli.NewApp() + app.Flags = flags + app.Action = func(_ *cli.Context) error { + setLogLevel(logLevel) + err := runWebhookServer(ctx, cfg, &options) + return err + } + + if err := app.Run(os.Args); err != nil { + logrus.Fatalf("run webhook server failed: %v", err) + } +} + +func runWebhookServer(ctx context.Context, cfg *rest.Config, options *config.Options) error { + resourceCaches, err := newCaches(ctx, cfg, options.Threadiness) + if err != nil { + return fmt.Errorf("error building resource caches: %s", err.Error()) + } + + webhookServer := server.NewWebhookServer(ctx, cfg, webhookName, options) + + bdMutator := blockdevice.NewBlockdeviceMutator(resourceCaches.bdCache) + var mutators = []admission.Mutator{ + bdMutator, + } + + bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache) + var validators = []admission.Validator{ + bdValidator, + } + + if err := webhookServer.RegisterMutators(mutators...); err != nil { + return fmt.Errorf("failed to register mutators: %v", err) + } + + if err := webhookServer.RegisterValidators(validators...); err != nil { + return fmt.Errorf("failed to register validators: %v", err) + } + + if err := webhookServer.Start(); err != nil { + return fmt.Errorf("failed to start webhook server: %v", err) + } + + <-ctx.Done() + return nil + +} + +func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*resourceCaches, error) { + var starters []start.Starter + + disks, err := ctldisk.NewFactoryFromConfig(cfg) + if err != nil { + return nil, err + } + starters = append(starters, disks) + resourceCaches := &resourceCaches{ + bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(), + } + + if err := start.All(ctx, threadiness, starters...); err != nil { + return nil, err + } + + return resourceCaches, nil +} + +func setLogLevel(level string) { + ll, err := logrus.ParseLevel(level) + if err != nil { + ll = logrus.DebugLevel + } + // set global log level + logrus.SetLevel(ll) +} diff --git a/pkg/webhook/blockdevice/mutator.go b/pkg/webhook/blockdevice/mutator.go new file mode 100644 index 00000000..ea5991a3 --- /dev/null +++ b/pkg/webhook/blockdevice/mutator.go @@ -0,0 +1,88 @@ +package blockdevice + +import ( + "github.com/harvester/webhook/pkg/server/admission" + admissionregv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/runtime" + + diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1" + ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1" +) + +type Mutator struct { + admission.DefaultMutator + + BlockdeviceCache ctldiskv1.BlockDeviceCache +} + +func NewBlockdeviceMutator(blockdeviceCache ctldiskv1.BlockDeviceCache) *Mutator { + return &Mutator{ + BlockdeviceCache: blockdeviceCache, + } +} + +func (m *Mutator) Update(_ *admission.Request, oldObj, newObj runtime.Object) (admission.Patch, error) { + var patchOps admission.Patch + oldBd := oldObj.(*diskv1.BlockDevice) + newBd := newObj.(*diskv1.BlockDevice) + + if newBd.Spec.FileSystem != nil && !newBd.Spec.FileSystem.Provisioned && !newBd.Spec.Provision { + return nil, nil + } + + prevProvision := oldBd.Status.ProvisionPhase == diskv1.ProvisionPhaseProvisioned + // That case means the spec.filesystem.provisioned is deprecated, so keep the provision value + if !prevProvision && newBd.Spec.Provision { + return nil, nil + } + + if !prevProvision && newBd.Spec.FileSystem.Provisioned { + provisionVal := newBd.Spec.FileSystem.Provisioned + patchAddNewProvision := admission.PatchOp{ + Op: admission.PatchOpReplace, + Path: "/spec/provision", + Value: provisionVal, + } + patchOps = append(patchOps, patchAddNewProvision) + if provisionVal && newBd.Spec.Provisioner == nil { + provisionerVal := &diskv1.LonghornProvisionerInfo{ + EngineVersion: "LonghornV1", + } + patchAddNewProvisioner := admission.PatchOp{ + Op: admission.PatchOpAdd, + Path: "/spec/provisioner", + Value: provisionerVal, + } + patchOps = append(patchOps, patchAddNewProvisioner) + } + return patchOps, nil + } + // means we need to disable, align the .spec.filesystem.provisioned with .spec.provision -> false + if prevProvision && (!newBd.Spec.Provision || !newBd.Spec.FileSystem.Provisioned) { + if newBd.Spec.Provision { + patchProvision := admission.PatchOp{ + Op: admission.PatchOpReplace, + Path: "/spec/provision", + Value: false, + } + patchOps = append(patchOps, patchProvision) + } + return patchOps, nil + } + + return patchOps, nil +} + +func (m *Mutator) Resource() admission.Resource { + return admission.Resource{ + Names: []string{"blockdevices"}, + Scope: admissionregv1.AllScopes, + APIGroup: diskv1.SchemeGroupVersion.Group, + APIVersion: diskv1.SchemeGroupVersion.Version, + ObjectType: &diskv1.BlockDevice{}, + OperationTypes: []admissionregv1.OperationType{ + // we donot care about Create because the bd is created by the controller + admissionregv1.Update, + }, + } +} diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go new file mode 100644 index 00000000..5eb42217 --- /dev/null +++ b/pkg/webhook/blockdevice/validator.go @@ -0,0 +1,58 @@ +package blockdevice + +import ( + werror "github.com/harvester/webhook/pkg/error" + "github.com/harvester/webhook/pkg/server/admission" + admissionregv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/runtime" + + diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1" + ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1" +) + +type Validator struct { + admission.DefaultValidator + + BlockdeviceCache ctldiskv1.BlockDeviceCache +} + +func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache) *Validator { + return &Validator{ + BlockdeviceCache: blockdeviceCache, + } +} + +func (v *Validator) Create(_ *admission.Request, newObj runtime.Object) error { + bd := newObj.(*diskv1.BlockDevice) + return v.validateProvisioner(bd) +} + +func (v *Validator) Update(_ *admission.Request, _, newObj runtime.Object) error { + newBd := newObj.(*diskv1.BlockDevice) + return v.validateProvisioner(newBd) +} + +func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error { + if bd.Spec.Provisioner == nil { + return nil + } + + if bd.Spec.Provisioner.LVM != nil && bd.Spec.Provisioner.Longhorn != nil { + return werror.NewBadRequest("Blockdevice should not have multiple provisioners") + } + return nil +} + +func (v *Validator) Resource() admission.Resource { + return admission.Resource{ + Names: []string{"blockdevices"}, + Scope: admissionregv1.AllScopes, + APIGroup: diskv1.SchemeGroupVersion.Group, + APIVersion: diskv1.SchemeGroupVersion.Version, + ObjectType: &diskv1.BlockDevice{}, + OperationTypes: []admissionregv1.OperationType{ + admissionregv1.Create, + admissionregv1.Update, + }, + } +}