From 803a1335df658e0a574b3f45e4b9a7d684ed77e7 Mon Sep 17 00:00:00 2001 From: renxiangyu_yewu Date: Fri, 8 Nov 2024 19:39:56 +0800 Subject: [PATCH] fix: fix kosmos-scheduler reschedule pv's pod Signed-off-by: renxiangyu_yewu --- .../controllers/pv/leaf_pv_controller.go | 4 +- .../utils/leaf_model_handler.go | 10 +- .../leafnode_volume_binding.go | 349 ++++++- .../leafnode_volume_binding_test.go | 849 ++++++++++++++++++ pkg/utils/constants.go | 1 + pkg/utils/pvpvc.go | 4 +- pkg/utils/utils.go | 8 + .../pkg/scheduler/framework/fake/listers.go | 320 +++++++ vendor/modules.txt | 1 + 9 files changed, 1525 insertions(+), 21 deletions(-) create mode 100644 pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/scheduler/framework/fake/listers.go diff --git a/pkg/clustertree/cluster-manager/controllers/pv/leaf_pv_controller.go b/pkg/clustertree/cluster-manager/controllers/pv/leaf_pv_controller.go index 0723515bc..b31b9dbec 100644 --- a/pkg/clustertree/cluster-manager/controllers/pv/leaf_pv_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pv/leaf_pv_controller.go @@ -211,13 +211,13 @@ func filterPV(pv *v1.PersistentVolume, nodeName string) { mfs := v.MatchFields mes := v.MatchExpressions for k, val := range v.MatchFields { - if val.Key == utils.NodeHostnameValue || val.Key == utils.NodeHostnameValueBeta { + if val.Key == utils.NodeHostnameValue || val.Key == utils.NodeHostnameValueBeta || val.Key == utils.OpenebsPVNodeLabel { val.Values = []string{nodeName} } mfs[k] = val } for k, val := range v.MatchExpressions { - if val.Key == utils.NodeHostnameValue || val.Key == utils.NodeHostnameValueBeta { + if val.Key == utils.NodeHostnameValue || val.Key == utils.NodeHostnameValueBeta || val.Key == utils.OpenebsPVNodeLabel { val.Values = []string{nodeName} } mes[k] = val diff --git a/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go b/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go index 158d94d2c..42c2be322 100644 --- a/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go +++ b/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go @@ -205,7 +205,7 @@ func updateTaints(client kubernetes.Interface, taints []corev1.Taint, nodeName s return nil } -func createNode(ctx context.Context, clientset kubernetes.Interface, clusterName, nodeName, gitVersion string, listenPort int32) (*corev1.Node, error) { +func createNode(ctx context.Context, clientset kubernetes.Interface, clusterName, nodeName, gitVersion string, listenPort int32, leafModel LeafMode) (*corev1.Node, error) { nodeInRoot, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) { @@ -217,6 +217,9 @@ func createNode(ctx context.Context, clientset kubernetes.Interface, clusterName if nodeAnnotations == nil { nodeAnnotations = make(map[string]string, 1) } + if leafModel == ALL { + nodeAnnotations[nodeMode] = "one2cluster" + } nodeAnnotations[utils.KosmosNodeOwnedByClusterAnnotations] = clusterName nodeInRoot.SetAnnotations(nodeAnnotations) @@ -243,8 +246,7 @@ func (h ClassificationHandler) CreateRootNode(ctx context.Context, listenPort in if h.leafMode == ALL { nodeNameInRoot := fmt.Sprintf("%s%s", utils.KosmosNodePrefix, cluster.Name) - nodeInRoot, err := createNode(ctx, h.RootClientset, cluster.Name, nodeNameInRoot, gitVersion, listenPort) - nodeInRoot.Annotations[nodeMode] = "one2cluster" + nodeInRoot, err := createNode(ctx, h.RootClientset, cluster.Name, nodeNameInRoot, gitVersion, listenPort, h.leafMode) if err != nil { return nil, nil, err } @@ -262,7 +264,7 @@ func (h ClassificationHandler) CreateRootNode(ctx context.Context, listenPort in nodeNameInRoot = nodeNameInRoot[:63] } - nodeInRoot, err := createNode(ctx, h.RootClientset, cluster.Name, nodeNameInRoot, gitVersion, listenPort) + nodeInRoot, err := createNode(ctx, h.RootClientset, cluster.Name, nodeNameInRoot, gitVersion, listenPort, h.leafMode) if err != nil { return nil, nil, err } diff --git a/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go b/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go index 9c7b09223..692ab8455 100644 --- a/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go +++ b/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go @@ -24,20 +24,29 @@ import ( "context" "errors" "fmt" + "sort" "sync" "time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" corelisters "k8s.io/client-go/listers/core/v1" + storagelisters "k8s.io/client-go/listers/storage/v1" + corev1helpers "k8s.io/component-helpers/scheduling/corev1" "k8s.io/component-helpers/storage/ephemeral" + "k8s.io/component-helpers/storage/volume" "k8s.io/klog/v2" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/scheduler/framework" scheduling "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics" "github.com/kosmos.io/kosmos/pkg/apis/config" "github.com/kosmos.io/kosmos/pkg/scheduler/lifted/helpers" + "github.com/kosmos.io/kosmos/pkg/utils" ) const ( @@ -70,8 +79,11 @@ func (d *stateData) Clone() framework.StateData { type VolumeBinding struct { Binder scheduling.SchedulerVolumeBinder PVCLister corelisters.PersistentVolumeClaimLister + PVLister corelisters.PersistentVolumeLister NodeLister corelisters.NodeLister + SCLister storagelisters.StorageClassLister frameworkHandler framework.Handle + pvCache scheduling.PVAssumeCache } var _ framework.PreFilterPlugin = &VolumeBinding{} @@ -204,23 +216,35 @@ func (pl *VolumeBinding) Filter(_ context.Context, cs *framework.CycleState, pod return framework.AsStatus(err) } - if helpers.HasLeafNodeTaint(node) { + if state.skip { + return nil + } + + podVolumes := &scheduling.PodVolumes{} + reasons := scheduling.ConflictReasons{} + isKosmosClusterNode := false + + if utils.HasKosmosNodeLabel(node) { + // the node is a kosmos node if cluster, ok := node.Annotations[nodeMode]; ok && cluster == "one2cluster" { - klog.V(5).InfoS("This is one2cluster ", "pod", klog.KObj(pod), "node", klog.KObj(node)) - return nil + // it is in "one2cluster" mode + isKosmosClusterNode = true + _, reasons, err = pl.FindPodKosmosVolumes(pod, state.boundClaims, state.claimsToBind, node) } else { + // it is in "one2one" mode if len(state.boundClaims) <= 0 { + // the pod is newly created return nil + } else { + // the pod has been created and is now being restarted. + podVolumes, reasons, err = pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node) } } + } else { + // the node is real node + podVolumes, reasons, err = pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node) } - if state.skip { - return nil - } - - podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node) - if err != nil { return framework.AsStatus(err) } @@ -233,13 +257,309 @@ func (pl *VolumeBinding) Filter(_ context.Context, cs *framework.CycleState, pod return status } - // multiple goroutines call `Filter` on different nodes simultaneously and the `CycleState` may be duplicated, so we must use a local lock here - state.Lock() - state.podVolumesByNode[node.Name] = podVolumes - state.Unlock() + // this operation is not required for kosmos node + if !isKosmosClusterNode { + // multiple goroutines call `Filter` on different nodes simultaneously and the `CycleState` may be duplicated, so we must use a local lock here + state.Lock() + state.podVolumesByNode[node.Name] = podVolumes + state.Unlock() + } + return nil } +// FindPodKosmosVolumes finds the matching PVs for PVCs and kosmos nodes to provision PVs +// for the given pod and node. If the kosmos node does not fit, confilict reasons are +// returned. +func (pl *VolumeBinding) FindPodKosmosVolumes(pod *corev1.Pod, boundClaims, claimsToBind []*corev1.PersistentVolumeClaim, node *corev1.Node) (podVolumes *PodVolumes, reasons scheduling.ConflictReasons, err error) { + podVolumes = &PodVolumes{} + + // Warning: Below log needs high verbosity as it can be printed several times (#60933). + klog.V(5).InfoS("FindPodKosmosVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node)) + + // Initialize to true for pods that don't have volumes. These + // booleans get translated into reason strings when the function + // returns without an error. + unboundVolumesSatisfied := true + boundVolumesSatisfied := true + sufficientStorage := true + boundPVsFound := true + defer func() { + if err != nil { + return + } + if !boundVolumesSatisfied { + reasons = append(reasons, scheduling.ErrReasonNodeConflict) + } + if !unboundVolumesSatisfied { + reasons = append(reasons, scheduling.ErrReasonBindConflict) + } + if !sufficientStorage { + reasons = append(reasons, scheduling.ErrReasonNotEnoughSpace) + } + if !boundPVsFound { + reasons = append(reasons, scheduling.ErrReasonPVNotExist) + } + }() + + defer func() { + if err != nil { + metrics.VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() + } + }() + + var ( + staticBindings []*BindingInfo + dynamicProvisions []*corev1.PersistentVolumeClaim + ) + defer func() { + // Although we do not distinguish nil from empty in this function, for + // easier testing, we normalize empty to nil. + if len(staticBindings) == 0 { + staticBindings = nil + } + if len(dynamicProvisions) == 0 { + dynamicProvisions = nil + } + podVolumes.StaticBindings = staticBindings + podVolumes.DynamicProvisions = dynamicProvisions + }() + + // Check PV node affinity on bound volumes + if len(boundClaims) > 0 { + boundVolumesSatisfied, boundPVsFound, err = pl.checkBoundClaims(boundClaims, node, pod) + if err != nil { + return + } + } + + // Find matching volumes and node for unbound claims + if len(claimsToBind) > 0 { + var ( + claimsToFindMatching []*corev1.PersistentVolumeClaim + claimsToProvision []*corev1.PersistentVolumeClaim + ) + + // Filter out claims to provision + for _, claim := range claimsToBind { + if clusterOwners, ok := claim.Annotations[utils.KosmosResourceOwnersAnnotations]; ok { + if clusterOwners != node.Annotations[utils.KosmosNodeOwnedByClusterAnnotations] { + // Fast path, skip unmatched node. + unboundVolumesSatisfied = false + return + } + claimsToProvision = append(claimsToProvision, claim) + } else { + claimsToFindMatching = append(claimsToFindMatching, claim) + } + } + + // Find matching volumes + if len(claimsToFindMatching) > 0 { + var unboundClaims []*corev1.PersistentVolumeClaim + unboundVolumesSatisfied, staticBindings, unboundClaims, err = pl.findMatchingVolumes(pod, claimsToFindMatching, node) + if err != nil { + return + } + claimsToProvision = append(claimsToProvision, unboundClaims...) + } + + // Check for claims to provision. This is the first time where we potentially + // find out that storage is not sufficient for the node. + if len(claimsToProvision) > 0 { + unboundVolumesSatisfied, sufficientStorage, dynamicProvisions, err = pl.checkVolumeProvisions(pod, claimsToProvision, node) + if err != nil { + return + } + } + } + + return +} + +// checkVolumeProvisions checks given unbound claims (the claims have gone through func +// findMatchingVolumes, and do not have matching volumes for binding), and return true +// if all the claims are eligible for dynamic provision. +func (pl *VolumeBinding) checkVolumeProvisions(pod *corev1.Pod, claimsToProvision []*corev1.PersistentVolumeClaim, node *corev1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*corev1.PersistentVolumeClaim, err error) { + dynamicProvisions = []*corev1.PersistentVolumeClaim{} + + // We return early with provisionedClaims == nil if a check + // fails or we encounter an error. + for _, claim := range claimsToProvision { + pvcName := getPVCName(claim) + className := volume.GetPersistentVolumeClaimClass(claim) + if className == "" { + return false, false, nil, fmt.Errorf("no class for claim %q", pvcName) + } + + class, err := pl.SCLister.Get(className) + if err != nil { + return false, false, nil, fmt.Errorf("failed to find storage class %q", className) + } + provisioner := class.Provisioner + if provisioner == "" || provisioner == volume.NotSupportedProvisioner { + klog.V(4).InfoS("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim)) + return false, true, nil, nil + } + + // Check if the node can satisfy the topology requirement in the class + if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { + klog.V(4).InfoS("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim)) + return false, true, nil, nil + } + + dynamicProvisions = append(dynamicProvisions, claim) + + } + klog.V(4).InfoS("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node)) + + return true, true, dynamicProvisions, nil +} + +func getPVCName(pvc *corev1.PersistentVolumeClaim) string { + return pvc.Namespace + "/" + pvc.Name +} + +// findMatchingVolumes tries to find matching volumes for given claims, +// and return unbound claims for further provision. +func (pl *VolumeBinding) findMatchingVolumes(pod *corev1.Pod, claimsToBind []*corev1.PersistentVolumeClaim, node *corev1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*corev1.PersistentVolumeClaim, err error) { + // Sort all the claims by increasing size request to get the smallest fits + sort.Sort(byPVCSize(claimsToBind)) + + chosenPVs := map[string]*corev1.PersistentVolume{} + + foundMatches = true + + for _, pvc := range claimsToBind { + // Get storage class name from each PVC + storageClassName := volume.GetPersistentVolumeClaimClass(pvc) + allPVs := pl.pvCache.ListPVs(storageClassName) + + // Find a matching PV + pv, err := volume.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true) + if err != nil { + return false, nil, nil, err + } + if pv == nil { + klog.V(4).InfoS("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node)) + unboundClaims = append(unboundClaims, pvc) + foundMatches = false + continue + } + + // matching PV needs to be excluded so we don't select it again + chosenPVs[pv.Name] = pv + bindings = append(bindings, &BindingInfo{pvc: pvc, pv: pv}) + klog.V(5).InfoS("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod)) + } + + if foundMatches { + klog.V(4).InfoS("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node)) + } + + return +} + +// checkBoundClaims Check whether the kosmos cluster owner annotation of the bound pvc/pvc matches the owner annotation of the node +func (pl *VolumeBinding) checkBoundClaims(claims []*corev1.PersistentVolumeClaim, node *corev1.Node, pod *corev1.Pod) (bool, bool, error) { + for _, pvc := range claims { + pvName := pvc.Spec.VolumeName + pv, err := pl.PVLister.Get(pvName) + if err != nil { + if apierrors.IsNotFound(err) { + err = nil + } + return true, false, err + } + + // todo Verification of migrated pods is not currently supported. + // translator.IsPVMigratable(pv) + + err = pl.checkKosmosResourceOwner(pv, node) + if err != nil { + klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err) + return false, true, nil + } + klog.V(5).InfoS("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod)) + } + + klog.V(4).InfoS("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node)) + return true, true, nil +} + +// checkKosmosResourceOwner looks at the PV node affinity, and checks if the kosmos node has the same corresponding labels +// This ensures that we don't mount a volume that doesn't belong to this node +func (pl *VolumeBinding) checkKosmosResourceOwner(pv *corev1.PersistentVolume, node *corev1.Node) error { + clusterOwners, ok := pv.Annotations[utils.KosmosResourceOwnersAnnotations] + if !ok { + // For pvc that has been bound to pv, but is not managed by kosmos + err := CheckNodeAffinity(pv, node.Labels) + return err + } + + if !(clusterOwners == node.Annotations[utils.KosmosNodeOwnedByClusterAnnotations]) { + klog.V(4).InfoS("This pv does not belong to the kosmos node", "node", klog.KObj(node), "pv", klog.KObj(pv)) + return fmt.Errorf("the owner cluster %s of the pv mismatch the owner cluster of %s this kosmos node", clusterOwners, node.Annotations[utils.KosmosNodeOwnedByClusterAnnotations]) + } + + return nil +} + +// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels +// This ensures that we don't mount a volume that doesn't belong to this node +func CheckNodeAffinity(pv *corev1.PersistentVolume, nodeLabels map[string]string) error { + if pv.Spec.NodeAffinity == nil { + return fmt.Errorf("node Affinity not specified") + } + + if pv.Spec.NodeAffinity.Required != nil { + node := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Labels: nodeLabels}} + terms := pv.Spec.NodeAffinity.Required + if matches, err := corev1helpers.MatchNodeSelectorTerms(node, terms); err != nil { + return err + } else if !matches { + return fmt.Errorf("no matching NodeSelectorTerms") + } + } + + return nil +} + +type byPVCSize []*corev1.PersistentVolumeClaim + +func (a byPVCSize) Len() int { + return len(a) +} + +func (a byPVCSize) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a byPVCSize) Less(i, j int) bool { + iSize := a[i].Spec.Resources.Requests[corev1.ResourceStorage] + jSize := a[j].Spec.Resources.Requests[corev1.ResourceStorage] + // return true if iSize is less than jSize + return iSize.Cmp(jSize) == -1 +} + +// BindingInfo holds a binding between PV and PVC. +type BindingInfo struct { + // PVC that needs to be bound + pvc *corev1.PersistentVolumeClaim + + // Proposed PV to bind to this PVC + pv *corev1.PersistentVolume +} + +// PodVolumes holds pod's volumes information used in volume scheduling. +type PodVolumes struct { + // StaticBindings are binding decisions for PVCs which can be bound to + // pre-provisioned static PVs. + StaticBindings []*BindingInfo + // DynamicProvisions are PVCs that require dynamic provisioning + DynamicProvisions []*corev1.PersistentVolumeClaim +} + // Reserve reserves volumes of pod and saves binding status in cycle state. func (pl *VolumeBinding) Reserve(_ context.Context, cs *framework.CycleState, pod *corev1.Pod, nodeName string) *framework.Status { node, err := pl.NodeLister.Get(nodeName) @@ -355,6 +675,9 @@ func New(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { Binder: binder, PVCLister: pvcInformer.Lister(), NodeLister: nodeInformer.Lister(), + PVLister: pvInformer.Lister(), + SCLister: storageClassInformer.Lister(), frameworkHandler: fh, + pvCache: scheduling.NewPVAssumeCache(pvInformer.Informer()), }, nil } diff --git a/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding_test.go b/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding_test.go new file mode 100644 index 000000000..bb6381994 --- /dev/null +++ b/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding_test.go @@ -0,0 +1,849 @@ +package leafnodevolumebinding + +import ( + "context" + "errors" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/pkg/scheduler/framework" + fake2 "k8s.io/kubernetes/pkg/scheduler/framework/fake" + scheduling "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" +) + +func TestStateDataClone(t *testing.T) { + // initialize a statedata object + state := &stateData{ + skip: false, + boundClaims: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "claim1"}}}, + claimsToBind: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "claim2"}}}, + allBound: false, + } + + // Clone stateData + clonedState := state.Clone().(*stateData) + + // make sure the cloned data is correct + // Use the assert Equal method of unittest.Test Case to compare whether state.skip and cloned State.skip are equal. + // If not equal, an AssertionError is thrown with the message "skip should be the same in cloned state" + assert.Equal(t, state.skip, clonedState.skip, "skip should be the same in cloned state") + // Use the assert Equal method of unittest.Test Case to compare whether the lengths of state.bound Claims and cloned State.bound Claims are equal. + // If not equal, an AssertionError is thrown with the message "boundClaims length should match" + assert.Equal(t, len(state.boundClaims), len(clonedState.boundClaims), "boundClaims length should match") + // Use the assert Equal method of unittest.Test Case to compare whether state.bound Claims[0].Name and cloned State.bound Claims[0].Name are equal. + // If not equal, an AssertionError is thrown with the message "boundClaims names should be equal" + assert.Equal(t, state.boundClaims[0].Name, clonedState.boundClaims[0].Name, "boundClaims names should be equal") + // Use the assert Equal method of unittest.Test Case to compare whether the lengths of state.claims To Bind and cloned State.claims To Bind are equal. + // If not equal, an AssertionError is thrown with the message "claimsToBind length should match" + assert.Equal(t, len(state.claimsToBind), len(clonedState.claimsToBind), "claimsToBind length should match") + // Use the assert Equal method of unittest.Test Case to compare whether state.claims To Bind[0].Name and cloned State.claims To Bind[0].Name are equal. + // If not equal, an AssertionError is thrown with the message "claimsToBind names should be equal" + assert.Equal(t, state.claimsToBind[0].Name, clonedState.claimsToBind[0].Name, "claimsToBind names should be equal") + // Use the assert Equal method of unittest.Test Case to compare whether state.all Bound and cloned State.all Bound are equal. + // If not equal, an AssertionError is thrown with the message "allBound should be the same in cloned state" + assert.Equal(t, state.allBound, clonedState.allBound, "allBound should be the same in cloned state") +} + +func TestVolumeBinding_Name(t *testing.T) { + // create a volumebinding instance + volumeBinding := &VolumeBinding{} + + // Check that the name returned by the Name method is correct + assert.Equal(t, "LeafNodeVolumeBinding", volumeBinding.Name(), "plugin name should be 'LeafNodeVolumeBinding'") +} + +func TestVolumeBinding_ImplementInterfaces(t *testing.T) { + // create a volumebinding instance + volumeBinding := &VolumeBinding{} + + // Verify whether VolumeBinding implements the PreFilterPlugin interface + var _ framework.PreFilterPlugin = volumeBinding + + // Verify whether VolumeBinding implements the FilterPlugin interface + var _ framework.FilterPlugin = volumeBinding + + // Verify whether VolumeBinding implements the ReservePlugin interface + var _ framework.ReservePlugin = volumeBinding + + // Verify whether VolumeBinding implements the PreBindPlugin interface + var _ framework.PreBindPlugin = volumeBinding +} + +type FakePVCInterface struct { + client kubernetes.Interface +} + +func (f *FakePVCInterface) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister { + return &FakePVCNamespaceLister{ + client: f.client, + namespace: namespace, + } +} + +func (f *FakePVCInterface) List(selector labels.Selector) ([]*v1.PersistentVolumeClaim, error) { + // Simulate returning a list of PVCs, this should return all PVCs in the namespace + pvcList := []*v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc2", + }, + }, + } + return pvcList, nil +} + +type FakePVCNamespaceLister struct { + client kubernetes.Interface + namespace string +} + +func (f *FakePVCNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + // Return the mock PVC + return &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, nil +} + +func (f *FakePVCNamespaceLister) List(selector labels.Selector) ([]*v1.PersistentVolumeClaim, error) { + // Simulate returning a list of PVCs for the namespace + pvcList := []*v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc2", + }, + }, + } + return pvcList, nil +} + +func TestVolumeBinding_podHasPVCs(t *testing.T) { + tests := []struct { + name string + pod *v1.Pod + VolumeBinding *VolumeBinding + expectedBool bool + expectedErr error + }{ + { + name: "pod has no PVC, pvc count is 0", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-no-pvc", + }, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{{}}, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: false, + expectedErr: nil, + }, + { + name: "pod has no PVC, pvc count is 1", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-no-pvc", + }, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-a", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: false, + expectedErr: nil, + }, + { + name: "pod have pvc, pvc count is 0", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-have-pvc", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-test", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-test", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{}, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: true, + expectedErr: errors.New("persistentvolumeclaim \"pvc-test\" not found"), + }, + { + name: "pod have pvc, pvc is found", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-pvc", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-found", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-found", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-found", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: true, + expectedErr: nil, + }, + { + name: "pvc is Ephemeral, but namespace is not equal", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-a", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-a", + VolumeSource: v1.VolumeSource{ + Ephemeral: &v1.EphemeralVolumeSource{ + VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-a", + }, + }, + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-a-pvc-a", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-b", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: true, + expectedErr: errors.New("PVC /pod-a-pvc-a was not created for pod /pod-a (pod is not owner)"), + }, + { + name: "pvc is Ephemeral, but namespace is equal", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-a", + Namespace: "test", + UID: "a", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-a", + VolumeSource: v1.VolumeSource{ + Ephemeral: &v1.EphemeralVolumeSource{ + VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-a", + }, + }, + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-a-pvc-a", + Namespace: "test", + UID: "a", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "a", + Controller: func(b bool) *bool { return &b }(true), + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-b", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: true, + expectedErr: nil, + }, + { + name: "pvc is lost", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimLost, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-b", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: true, + expectedErr: errors.New("persistentvolumeclaim \"pvc\" bound to non-existent persistentvolume \"\""), + }, + { + name: "pvc is DeletionTimestamp is not nil", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + DeletionTimestamp: func(time metav1.Time) *metav1.Time { return &time }(metav1.Now()), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-b", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedBool: true, + expectedErr: errors.New("persistentvolumeclaim \"pvc\" is being deleted"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Execute PreFilter + bool, err := tt.VolumeBinding.podHasPVCs(tt.pod) + + // Validate result + assert.Equal(t, tt.expectedBool, bool) + assert.Equal(t, tt.expectedErr, err) + }) + } +} + +func TestPreFilter(t *testing.T) { + tests := []struct { + name string + pod *v1.Pod + VolumeBinding *VolumeBinding + expectedStatus *framework.Status + }{ + { + name: "pod has no PVCs", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-no-pvc", + }, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-a", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedStatus: nil, + }, + { + name: "pod have pvc", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-have-pvc", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-test", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-test", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-test", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedStatus: nil, + }, + { + name: "pod have pvc, but not found", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-have-not-found-pvc", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-not-found", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-not-found", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-a", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "persistentvolumeclaim \"pvc-not-found\" not found"), + }, + { + name: "pod have pvc, but pvc have two", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-a-pvc", + }, + Spec: v1.PodSpec{Volumes: []v1.Volume{ + { + Name: "pvc-a", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-a", + }, + }, + }, + }}, + }, + VolumeBinding: &VolumeBinding{ + PVCLister: fake2.PersistentVolumeClaimLister{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-a", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-b", + }, + }, + }, + Binder: &scheduling.FakeVolumeBinder{ + AssumeCalled: false, + BindCalled: false, + }, + }, + expectedStatus: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + cycleState := framework.NewCycleState() + + // Execute PreFilter + _, status := tt.VolumeBinding.PreFilter(ctx, cycleState, tt.pod) + + // Validate status + assert.Equal(t, tt.expectedStatus, status) + }) + } +} + +func GetPVCName(pvc *v1.PersistentVolumeClaim) string { + return pvc.Namespace + "/" + pvc.Name +} +func TestGetPVCName(t *testing.T) { + tests := []struct { + name string + pvc *v1.PersistentVolumeClaim + expectedResult string + }{ + { + name: "test1", + pvc: &v1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{}, + UID: "1234567890", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Finalizers: []string{"kubernetes"}, + }, + Spec: v1.PersistentVolumeClaimSpec{}, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimLost, + }, + }, + expectedResult: "default/test1", + }, + { + name: "test1", + pvc: &v1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + Namespace: "kube-system", + Labels: map[string]string{}, + Annotations: map[string]string{}, + UID: "1234567890", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Finalizers: []string{"kubernetes"}, + }, + Spec: v1.PersistentVolumeClaimSpec{}, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimLost, + }, + }, + expectedResult: "kube-system/test2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Execute PreFilter + result := getPVCName(tt.pvc) + + // Validate status + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestCheckNodeAffinity(t *testing.T) { + tests := []struct { + name string + pv *v1.PersistentVolume + nodeLabels map[string]string + wantErr bool + }{ + { + name: "NodeAffinity not specified", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + }, + wantErr: true, + }, + { + name: "No matching NodeSelectorTerms", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + NodeAffinity: &v1.VolumeNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "key", + Operator: v1.NodeSelectorOpExists, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Matching NodeSelectorTerms", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + NodeAffinity: &v1.VolumeNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "key", + Operator: v1.NodeSelectorOpIn, + Values: []string{"value"}, + }, + }, + }, + }, + }, + }, + }, + }, + nodeLabels: map[string]string{ + "key": "value", + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := CheckNodeAffinity(tt.pv, tt.nodeLabels) + if (err != nil) != tt.wantErr { + t.Errorf("CheckNodeAffinity() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestByPVCSize_Len(t *testing.T) { + // test the length of the pvc list + pvcList := byPVCSize{ + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + } + + // verify len method + assert.Equal(t, 2, pvcList.Len()) +} + +func TestByPVCSize_Swap(t *testing.T) { + // test the swap method + pvcList := byPVCSize{ + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + } + + // verify the order before swap method + assert.Equal(t, "pvc1", pvcList[0].Name) + assert.Equal(t, "pvc2", pvcList[1].Name) + + // execute swap + pvcList.Swap(0, 1) + + // verify the order after the swap method + assert.Equal(t, "pvc2", pvcList[0].Name) + assert.Equal(t, "pvc1", pvcList[1].Name) +} + +func TestByPVCSize_Less(t *testing.T) { + // test the less method + pvcList := byPVCSize{ + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + } + + // Verify Less method, should return true because "1Gi" < "2Gi" + assert.True(t, pvcList.Less(1, 0)) +} + +func TestByPVCSize_Sort(t *testing.T) { + // test sorting the pvc list + pvcList := byPVCSize{ + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + } + + // perform sorting + sort.Sort(pvcList) + + // verify sorting results + assert.Equal(t, "pvc2", pvcList[0].Name) + assert.Equal(t, "pvc1", pvcList[1].Name) +} diff --git a/pkg/utils/constants.go b/pkg/utils/constants.go index 8fdfcacd7..cf3e06a10 100644 --- a/pkg/utils/constants.go +++ b/pkg/utils/constants.go @@ -131,6 +131,7 @@ const ( NodeOSLabelBeta = "beta.kubernetes.io/os" NodeHostnameValue = corev1.LabelHostname NodeHostnameValueBeta = "beta.kubernetes.io/hostname" + OpenebsPVNodeLabel = "openebs.io/nodename" NodeOSLabelStable = corev1.LabelOSStable NodeArchLabelStable = corev1.LabelArchStable PVCSelectedNodeKey = "volume.kubernetes.io/selected-node" diff --git a/pkg/utils/pvpvc.go b/pkg/utils/pvpvc.go index 9ceaebbf4..619564745 100644 --- a/pkg/utils/pvpvc.go +++ b/pkg/utils/pvpvc.go @@ -27,12 +27,12 @@ func NodeAffinity4RootPV(pv *v1.PersistentVolume, isOne2OneMode bool, clusterNam if isOne2OneMode { for _, v := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { for _, val := range v.MatchFields { - if val.Key == NodeHostnameValue || val.Key == NodeHostnameValueBeta { + if val.Key == NodeHostnameValue || val.Key == NodeHostnameValueBeta || val.Key == OpenebsPVNodeLabel { node4RootPV = val.Values[0] } } for _, val := range v.MatchExpressions { - if val.Key == NodeHostnameValue || val.Key == NodeHostnameValueBeta { + if val.Key == NodeHostnameValue || val.Key == NodeHostnameValueBeta || val.Key == OpenebsPVNodeLabel { node4RootPV = val.Values[0] } } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 8a5d47b25..71b0a17df 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -66,3 +66,11 @@ func FormatCIDR(cidr string) (string, error) { } return ipNet.String(), nil } + +func HasKosmosNodeLabel(node *corev1.Node) bool { + if kosmosNodeLabel, ok := node.Labels[KosmosNodeLabel]; ok && kosmosNodeLabel == KosmosNodeValue { + return true + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/fake/listers.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/fake/listers.go new file mode 100644 index 000000000..61c8dfc34 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/fake/listers.go @@ -0,0 +1,320 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" + storagelisters "k8s.io/client-go/listers/storage/v1" + "k8s.io/kubernetes/pkg/scheduler/framework" +) + +var _ corelisters.ServiceLister = &ServiceLister{} + +// ServiceLister implements ServiceLister on []v1.Service for test purposes. +type ServiceLister []*v1.Service + +// Services returns nil. +func (f ServiceLister) Services(namespace string) corelisters.ServiceNamespaceLister { + var services []*v1.Service + for i := range f { + if f[i].Namespace == namespace { + services = append(services, f[i]) + } + } + return &serviceNamespaceLister{ + services: services, + namespace: namespace, + } +} + +// List returns v1.ServiceList, the list of all services. +func (f ServiceLister) List(labels.Selector) ([]*v1.Service, error) { + return f, nil +} + +// serviceNamespaceLister is implementation of ServiceNamespaceLister returned by Services() above. +type serviceNamespaceLister struct { + services []*v1.Service + namespace string +} + +func (f *serviceNamespaceLister) Get(name string) (*v1.Service, error) { + return nil, fmt.Errorf("not implemented") +} + +func (f *serviceNamespaceLister) List(selector labels.Selector) ([]*v1.Service, error) { + return f.services, nil +} + +var _ corelisters.ReplicationControllerLister = &ControllerLister{} + +// ControllerLister implements ControllerLister on []v1.ReplicationController for test purposes. +type ControllerLister []*v1.ReplicationController + +// List returns []v1.ReplicationController, the list of all ReplicationControllers. +func (f ControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) { + return f, nil +} + +// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod +func (f ControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) { + var selector labels.Selector + + for i := range f { + controller := f[i] + if controller.Namespace != pod.Namespace { + continue + } + selector = labels.Set(controller.Spec.Selector).AsSelectorPreValidated() + if selector.Matches(labels.Set(pod.Labels)) { + controllers = append(controllers, controller) + } + } + if len(controllers) == 0 { + err = fmt.Errorf("could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return +} + +// ReplicationControllers returns nil +func (f ControllerLister) ReplicationControllers(namespace string) corelisters.ReplicationControllerNamespaceLister { + return nil +} + +var _ appslisters.ReplicaSetLister = &ReplicaSetLister{} + +// ReplicaSetLister implements ControllerLister on []extensions.ReplicaSet for test purposes. +type ReplicaSetLister []*appsv1.ReplicaSet + +// List returns replica sets. +func (f ReplicaSetLister) List(labels.Selector) ([]*appsv1.ReplicaSet, error) { + return f, nil +} + +// GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod +func (f ReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*appsv1.ReplicaSet, err error) { + var selector labels.Selector + + for _, rs := range f { + if rs.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + // This object has an invalid selector, it does not match the pod + continue + } + + if selector.Matches(labels.Set(pod.Labels)) { + rss = append(rss, rs) + } + } + if len(rss) == 0 { + err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return +} + +// ReplicaSets returns nil +func (f ReplicaSetLister) ReplicaSets(namespace string) appslisters.ReplicaSetNamespaceLister { + return nil +} + +var _ appslisters.StatefulSetLister = &StatefulSetLister{} + +// StatefulSetLister implements ControllerLister on []appsv1.StatefulSet for testing purposes. +type StatefulSetLister []*appsv1.StatefulSet + +// List returns stateful sets. +func (f StatefulSetLister) List(labels.Selector) ([]*appsv1.StatefulSet, error) { + return f, nil +} + +// GetPodStatefulSets gets the StatefulSets that have the selector that match the labels on the given pod. +func (f StatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*appsv1.StatefulSet, err error) { + var selector labels.Selector + + for _, ss := range f { + if ss.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ss.Spec.Selector) + if err != nil { + // This object has an invalid selector, it does not match the pod + continue + } + if selector.Matches(labels.Set(pod.Labels)) { + sss = append(sss, ss) + } + } + if len(sss) == 0 { + err = fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// StatefulSets returns nil +func (f StatefulSetLister) StatefulSets(namespace string) appslisters.StatefulSetNamespaceLister { + return nil +} + +// persistentVolumeClaimNamespaceLister is implementation of PersistentVolumeClaimNamespaceLister returned by List() above. +type persistentVolumeClaimNamespaceLister struct { + pvcs []*v1.PersistentVolumeClaim + namespace string +} + +func (f *persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + for _, pvc := range f.pvcs { + if pvc.Name == name && pvc.Namespace == f.namespace { + return pvc, nil + } + } + return nil, fmt.Errorf("persistentvolumeclaim %q not found", name) +} + +func (f persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + return nil, fmt.Errorf("not implemented") +} + +// PersistentVolumeClaimLister declares a []v1.PersistentVolumeClaim type for testing. +type PersistentVolumeClaimLister []v1.PersistentVolumeClaim + +var _ corelisters.PersistentVolumeClaimLister = PersistentVolumeClaimLister{} + +// List gets PVC matching the namespace and PVC ID. +func (pvcs PersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + return nil, fmt.Errorf("not implemented") +} + +// PersistentVolumeClaims returns a fake PersistentVolumeClaimLister object. +func (pvcs PersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister { + ps := make([]*v1.PersistentVolumeClaim, len(pvcs)) + for i := range pvcs { + ps[i] = &pvcs[i] + } + return &persistentVolumeClaimNamespaceLister{ + pvcs: ps, + namespace: namespace, + } +} + +// NodeInfoLister declares a framework.NodeInfo type for testing. +type NodeInfoLister []*framework.NodeInfo + +// Get returns a fake node object in the fake nodes. +func (nodes NodeInfoLister) Get(nodeName string) (*framework.NodeInfo, error) { + for _, node := range nodes { + if node != nil && node.Node().Name == nodeName { + return node, nil + } + } + return nil, fmt.Errorf("unable to find node: %s", nodeName) +} + +// List lists all nodes. +func (nodes NodeInfoLister) List() ([]*framework.NodeInfo, error) { + return nodes, nil +} + +// HavePodsWithAffinityList is supposed to list nodes with at least one pod with affinity. For the fake lister +// we just return everything. +func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) { + return nodes, nil +} + +// HavePodsWithRequiredAntiAffinityList is supposed to list nodes with at least one pod with +// required anti-affinity. For the fake lister we just return everything. +func (nodes NodeInfoLister) HavePodsWithRequiredAntiAffinityList() ([]*framework.NodeInfo, error) { + return nodes, nil +} + +var _ storagelisters.CSINodeLister = CSINodeLister{} + +// CSINodeLister declares a storagev1.CSINode type for testing. +type CSINodeLister []storagev1.CSINode + +// Get returns a fake CSINode object. +func (n CSINodeLister) Get(name string) (*storagev1.CSINode, error) { + for _, cn := range n { + if cn.Name == name { + return &cn, nil + } + } + return nil, fmt.Errorf("csiNode %q not found", name) +} + +// List lists all CSINodes in the indexer. +func (n CSINodeLister) List(selector labels.Selector) (ret []*storagev1.CSINode, err error) { + return nil, fmt.Errorf("not implemented") +} + +// PersistentVolumeLister declares a []v1.PersistentVolume type for testing. +type PersistentVolumeLister []v1.PersistentVolume + +var _ corelisters.PersistentVolumeLister = PersistentVolumeLister{} + +// Get returns a fake PV object in the fake PVs by PV ID. +func (pvs PersistentVolumeLister) Get(pvID string) (*v1.PersistentVolume, error) { + for _, pv := range pvs { + if pv.Name == pvID { + return &pv, nil + } + } + return nil, fmt.Errorf("unable to find persistent volume: %s", pvID) +} + +// List lists all PersistentVolumes in the indexer. +func (pvs PersistentVolumeLister) List(selector labels.Selector) ([]*v1.PersistentVolume, error) { + return nil, fmt.Errorf("not implemented") +} + +// StorageClassLister declares a []storagev1.StorageClass type for testing. +type StorageClassLister []storagev1.StorageClass + +var _ storagelisters.StorageClassLister = StorageClassLister{} + +// Get returns a fake storage class object in the fake storage classes by name. +func (classes StorageClassLister) Get(name string) (*storagev1.StorageClass, error) { + for _, sc := range classes { + if sc.Name == name { + return &sc, nil + } + } + return nil, &errors.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + Message: fmt.Sprintf("unable to find storage class: %s", name), + }, + } +} + +// List lists all StorageClass in the indexer. +func (classes StorageClassLister) List(selector labels.Selector) ([]*storagev1.StorageClass, error) { + return nil, fmt.Errorf("not implemented") +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a99a08002..4798f6570 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1799,6 +1799,7 @@ k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2 k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3 k8s.io/kubernetes/pkg/scheduler/apis/config/validation k8s.io/kubernetes/pkg/scheduler/framework +k8s.io/kubernetes/pkg/scheduler/framework/fake k8s.io/kubernetes/pkg/scheduler/framework/parallelize k8s.io/kubernetes/pkg/scheduler/framework/plugins k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder