diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index f6ce916c6bfe1..5ab477e1e0f03 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -45,7 +45,6 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics" - "k8s.io/kubernetes/pkg/volume/util" ) // ConflictReason is used for the special strings which explain why @@ -127,8 +126,6 @@ type InTreeToCSITranslator interface { // 1. The scheduler takes a Pod off the scheduler queue and processes it serially: // a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked // here, pod volume information will be saved in current scheduling cycle state for later use. -// If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce -// down the list of eligible nodes based on the bound PV's NodeAffinity (if any). // b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here. // c. Invokes all score plugins. Future/TBD // d. Selects the best node for the Pod. @@ -151,14 +148,6 @@ type SchedulerVolumeBinder interface { // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) - // GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be - // potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used - // in subsequent scheduling stages. - // - // If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made - // and all nodes should be considered. - GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) - // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the // node and returns pod's volumes information. // @@ -381,55 +370,6 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume return } -// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be -// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used -// in subsequent scheduling stages. -// -// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes -// should be considered. -func (b *volumeBinder) GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { - if len(boundClaims) == 0 { - return - } - - var errs []error - for _, pvc := range boundClaims { - pvName := pvc.Spec.VolumeName - pv, err := b.pvCache.GetPV(pvName) - if err != nil { - errs = append(errs, err) - continue - } - - // if the PersistentVolume is local and has node affinity matching specific node(s), - // add them to the eligible nodes - nodeNames := util.GetLocalPersistentVolumeNodeNames(pv) - if len(nodeNames) != 0 { - // on the first found list of eligible nodes for the local PersistentVolume, - // insert to the eligible node set. - if eligibleNodes == nil { - eligibleNodes = sets.New(nodeNames...) - } else { - // for subsequent finding of eligible nodes for the local PersistentVolume, - // take the intersection of the nodes with the existing eligible nodes - // for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2, - // then the eligible node list should be empty. - eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...)) - } - } - } - - if len(errs) > 0 { - logger.V(4).Info("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs) - return nil - } - - if eligibleNodes != nil { - logger.V(4).Info("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes) - } - return -} - // AssumePodVolumes will take the matching PVs and PVCs to provision in pod's // volume information for the chosen node, and: // 1. Update the pvCache with the new prebound PV. diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go index 1746780ce2ebc..51b9f4c56040e 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "os" - "reflect" "sort" "testing" "time" @@ -32,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/informers" @@ -62,9 +60,6 @@ var ( boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass) immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass) immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass) - localPreboundPVC1a = makeTestPVC("local-prebound-pvc-1a", "1G", "", pvcPrebound, "local-pv-node1a", "1", &waitClass) - localPreboundPVC1b = makeTestPVC("local-prebound-pvc-1b", "1G", "", pvcPrebound, "local-pv-node1b", "1", &waitClass) - localPreboundPVC2a = makeTestPVC("local-prebound-pvc-2a", "1G", "", pvcPrebound, "local-pv-node2a", "1", &waitClass) // PVCs for dynamic provisioning provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner) @@ -96,9 +91,6 @@ var ( pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass) pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass) pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass) - localPVNode1a = makeLocalPV("local-pv-node1a", "node1", "5G", "1", nil, waitClass) - localPVNode1b = makeLocalPV("local-pv-node1b", "node1", "10G", "1", nil, waitClass) - localPVNode2a = makeLocalPV("local-pv-node2a", "node2", "5G", "1", nil, waitClass) // PVs for CSI migration migrationPVBound = makeTestPVForCSIMigration(zone1Labels, boundMigrationPVC, true) @@ -726,12 +718,6 @@ func makeTestPVForCSIMigration(labels map[string]string, pvc *v1.PersistentVolum return pv } -func makeLocalPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume { - pv := makeTestPV(name, node, capacity, version, boundToPVC, className) - pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key = v1.LabelHostname - return pv -} - func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim { newPVC := pvc.DeepCopy() metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnSelectedNode, node) @@ -2343,130 +2329,3 @@ func TestCapacity(t *testing.T) { }) } } - -func TestGetEligibleNodes(t *testing.T) { - type scenarioType struct { - // Inputs - pvcs []*v1.PersistentVolumeClaim - pvs []*v1.PersistentVolume - nodes []*v1.Node - - // Expected return values - eligibleNodes sets.Set[string] - } - - scenarios := map[string]scenarioType{ - "no-bound-claims": {}, - "no-nodes-found": { - pvcs: []*v1.PersistentVolumeClaim{ - preboundPVC, - preboundPVCNode1a, - }, - }, - "pv-not-found": { - pvcs: []*v1.PersistentVolumeClaim{ - preboundPVC, - preboundPVCNode1a, - }, - nodes: []*v1.Node{ - node1, - }, - }, - "node-affinity-mismatch": { - pvcs: []*v1.PersistentVolumeClaim{ - preboundPVC, - preboundPVCNode1a, - }, - pvs: []*v1.PersistentVolume{ - pvNode1a, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - }, - "local-pv-with-node-affinity": { - pvcs: []*v1.PersistentVolumeClaim{ - localPreboundPVC1a, - localPreboundPVC1b, - }, - pvs: []*v1.PersistentVolume{ - localPVNode1a, - localPVNode1b, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - eligibleNodes: sets.New("node1"), - }, - "multi-local-pv-with-different-nodes": { - pvcs: []*v1.PersistentVolumeClaim{ - localPreboundPVC1a, - localPreboundPVC1b, - localPreboundPVC2a, - }, - pvs: []*v1.PersistentVolume{ - localPVNode1a, - localPVNode1b, - localPVNode2a, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - eligibleNodes: sets.New[string](), - }, - "local-and-non-local-pv": { - pvcs: []*v1.PersistentVolumeClaim{ - localPreboundPVC1a, - localPreboundPVC1b, - preboundPVC, - immediateBoundPVC, - }, - pvs: []*v1.PersistentVolume{ - localPVNode1a, - localPVNode1b, - pvNode1a, - pvBoundImmediate, - pvBoundImmediateNode2, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - eligibleNodes: sets.New("node1"), - }, - } - - run := func(t *testing.T, scenario scenarioType) { - logger, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Setup - testEnv := newTestBinder(t, ctx) - testEnv.initVolumes(scenario.pvs, scenario.pvs) - - testEnv.initNodes(scenario.nodes) - testEnv.initClaims(scenario.pvcs, scenario.pvcs) - - // Execute - eligibleNodes := testEnv.binder.GetEligibleNodes(logger, scenario.pvcs) - - // Validate - if reflect.DeepEqual(scenario.eligibleNodes, eligibleNodes) { - fmt.Println("foo") - } - - if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.Set[string]) bool { - return reflect.DeepEqual(a, b) - })); compDiff != "" { - t.Errorf("Unexpected eligible nodes (-want +got):\n%s", compDiff) - } - } - - for name, scenario := range scenarios { - t.Run(name, func(t *testing.T) { run(t, scenario) }) - } -} diff --git a/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go index 667669c65b44c..f563c3c756372 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go @@ -20,7 +20,6 @@ import ( "context" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" ) @@ -55,11 +54,6 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVo return &PodVolumeClaims{}, nil } -// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes. -func (b *FakeVolumeBinder) GetEligibleNodes(_ klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { - return nil -} - // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { return nil, b.config.FindReasons, b.config.FindErr diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index cb56ed9260782..2bb0fb56b6d7d 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -194,14 +194,6 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt status.AppendReason("pod has unbound immediate PersistentVolumeClaims") return nil, status } - // Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims. - var result *framework.PreFilterResult - if eligibleNodes := pl.Binder.GetEligibleNodes(logger, podVolumeClaims.boundClaims); eligibleNodes != nil { - result = &framework.PreFilterResult{ - NodeNames: eligibleNodes, - } - } - state.Write(stateKey, &stateData{ podVolumesByNode: make(map[string]*PodVolumes), podVolumeClaims: &PodVolumeClaims{ @@ -210,7 +202,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding, }, }) - return result, nil + return nil, nil } // PreFilterExtensions returns prefilter extensions, pod add and remove. diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go index d383315d83095..709f23bdd8d98 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go @@ -27,7 +27,6 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/klog/v2/ktesting" @@ -127,43 +126,6 @@ func TestVolumeBinding(t *testing.T) { }, wantPreScoreStatus: framework.NewStatus(framework.Skip), }, - { - name: "all bound with local volumes", - pod: makePod("pod-a").withPVCVolume("pvc-a", "volume-a").withPVCVolume("pvc-b", "volume-b").Pod, - nodes: []*v1.Node{ - makeNode("node-a").Node, - }, - pvcs: []*v1.PersistentVolumeClaim{ - makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, - makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim, - }, - pvs: []*v1.PersistentVolume{ - makePV("pv-a", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{ - v1.LabelHostname: {"node-a"}, - }).PersistentVolume, - makePV("pv-b", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{ - v1.LabelHostname: {"node-a"}, - }).PersistentVolume, - }, - wantPreFilterResult: &framework.PreFilterResult{ - NodeNames: sets.New("node-a"), - }, - wantStateAfterPreFilter: &stateData{ - podVolumeClaims: &PodVolumeClaims{ - boundClaims: []*v1.PersistentVolumeClaim{ - makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, - makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim, - }, - unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{}, - unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{}, - }, - podVolumesByNode: map[string]*PodVolumes{}, - }, - wantFilterStatus: []*framework.Status{ - nil, - }, - wantPreScoreStatus: framework.NewStatus(framework.Skip), - }, { name: "PVC does not exist", pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod, diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index 620c496b14e26..c3e850d27720d 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -1798,8 +1798,9 @@ func TestSchedulerSchedulePod(t *testing.T) { name string registerPlugins []tf.RegisterPluginFunc extenders []tf.FakeExtender - nodes []string + nodes []*v1.Node pvcs []v1.PersistentVolumeClaim + pvs []v1.PersistentVolume pod *v1.Pod pods []*v1.Pod wantNodes sets.Set[string] @@ -1812,9 +1813,12 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, - pod: st.MakePod().Name("2").UID("2").Obj(), - name: "test 1", + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, + pod: st.MakePod().Name("2").UID("2").Obj(), + name: "test 1", wErr: &framework.FitError{ Pod: st.MakePod().Name("2").UID("2").Obj(), NumAllNodes: 2, @@ -1834,7 +1838,10 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, pod: st.MakePod().Name("ignore").UID("ignore").Obj(), wantNodes: sets.New("node1", "node2"), name: "test 2", @@ -1847,7 +1854,10 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, pod: st.MakePod().Name("node2").UID("node2").Obj(), wantNodes: sets.New("node2"), name: "test 3", @@ -1860,7 +1870,11 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"3", "2", "1"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + }, pod: st.MakePod().Name("ignore").UID("ignore").Obj(), wantNodes: sets.New("3"), name: "test 4", @@ -1873,7 +1887,11 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"3", "2", "1"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + }, pod: st.MakePod().Name("2").UID("2").Obj(), wantNodes: sets.New("2"), name: "test 5", @@ -1887,7 +1905,11 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"3", "2", "1"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + }, pod: st.MakePod().Name("2").UID("2").Obj(), wantNodes: sets.New("1"), name: "test 6", @@ -1901,9 +1923,13 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"3", "2", "1"}, - pod: st.MakePod().Name("2").UID("2").Obj(), - name: "test 7", + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + }, + pod: st.MakePod().Name("2").UID("2").Obj(), + name: "test 7", wErr: &framework.FitError{ Pod: st.MakePod().Name("2").UID("2").Obj(), NumAllNodes: 3, @@ -1928,9 +1954,12 @@ func TestSchedulerSchedulePod(t *testing.T) { pods: []*v1.Pod{ st.MakePod().Name("2").UID("2").Node("2").Phase(v1.PodRunning).Obj(), }, - pod: st.MakePod().Name("2").UID("2").Obj(), - nodes: []string{"1", "2"}, - name: "test 8", + pod: st.MakePod().Name("2").UID("2").Obj(), + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + }, + name: "test 8", wErr: &framework.FitError{ Pod: st.MakePod().Name("2").UID("2").Obj(), NumAllNodes: 2, @@ -1952,13 +1981,19 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, pvcs: []v1.PersistentVolumeClaim{ { ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "existingPV"}, }, }, + pvs: []v1.PersistentVolume{ + {ObjectMeta: metav1.ObjectMeta{Name: "existingPV"}}, + }, pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(), wantNodes: sets.New("node1", "node2"), name: "existing PVC", @@ -1972,9 +2007,12 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, - pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(), - name: "unknown PVC", + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, + pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(), + name: "unknown PVC", wErr: &framework.FitError{ Pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(), NumAllNodes: 2, @@ -1996,10 +2034,13 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, - pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}}, - pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(), - name: "deleted PVC", + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, + pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}}, + pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(), + name: "deleted PVC", wErr: &framework.FitError{ Pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(), NumAllNodes: 2, @@ -2021,10 +2062,13 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"2", "1"}, - pod: st.MakePod().Name("2").Obj(), - name: "test error with priority map", - wErr: fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)), + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + }, + pod: st.MakePod().Name("2").Obj(), + name: "test error with priority map", + wErr: fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)), }, { name: "test podtopologyspread plugin - 2 nodes with maxskew=1", @@ -2038,8 +2082,11 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, - pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{ + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, + pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "kubernetes.io/hostname", v1.DoNotSchedule, &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { Key: "foo", @@ -2066,8 +2113,12 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2", "node3"}, - pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{ + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}}, + }, + pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "kubernetes.io/hostname", v1.DoNotSchedule, &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { Key: "foo", @@ -2094,7 +2145,9 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"3"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + }, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), wantNodes: nil, wErr: &framework.FitError{ @@ -2125,7 +2178,11 @@ func TestSchedulerSchedulePod(t *testing.T) { Predicates: []tf.FitPredicate{tf.FalsePredicateExtender}, }, }, - nodes: []string{"1", "2", "3"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + }, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), wantNodes: nil, wErr: &framework.FitError{ @@ -2152,7 +2209,9 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"3"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}}, + }, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), wantNodes: nil, wErr: &framework.FitError{ @@ -2177,7 +2236,10 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"1", "2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + }, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), wantNodes: nil, wErr: nil, @@ -2192,7 +2254,10 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"1", "2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + }, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: nil, wErr: &framework.FitError{ @@ -2218,7 +2283,10 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"1", "2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + }, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: nil, wErr: fmt.Errorf(`running PreFilter plugin "FakePreFilter": %w`, errors.New("injected error status")), @@ -2241,9 +2309,15 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2", "node3"}, - pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), - wantNodes: sets.New("node2"), + wantNodes: sets.New("node2"), + // since this case has no score plugin, we'll only try to find one node in Filter stage + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}}, + }, + pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), + // since this case has no score plugin, we'll only try to find one node in Filter stage wantEvaluatedNodes: ptr.To[int32](1), }, { @@ -2264,8 +2338,12 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2", "node3"}, - pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}}, + }, + pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wErr: &framework.FitError{ Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), NumAllNodes: 3, @@ -2294,8 +2372,10 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1"}, - pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + }, + pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wErr: &framework.FitError{ Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), NumAllNodes: 1, @@ -2322,8 +2402,11 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2"}, - pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, + pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wErr: &framework.FitError{ Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), NumAllNodes: 2, @@ -2368,7 +2451,11 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2", "node3"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}}, + }, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: sets.New("node2", "node3"), wantEvaluatedNodes: ptr.To[int32](3), @@ -2384,7 +2471,10 @@ func TestSchedulerSchedulePod(t *testing.T) { framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"), ), "PreScore", "Score"), }, - nodes: []string{"node1", "node2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + }, pod: st.MakePod().Name("ignore").UID("ignore").Obj(), wantNodes: sets.New("node1", "node2"), }, @@ -2395,7 +2485,11 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2", "node3"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}}, + }, pod: st.MakePod().Name("pod1").UID("pod1").Obj(), wantNodes: sets.New("node1", "node2", "node3"), wantEvaluatedNodes: ptr.To[int32](1), @@ -2410,9 +2504,14 @@ func TestSchedulerSchedulePod(t *testing.T) { ), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"node1", "node2", "node3"}, - pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), - wantNodes: sets.New("node1", "node2"), + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}}, + }, + pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), + wantNodes: sets.New("node1", "node2"), + // since this case has no score plugin, we'll only try to find one node in Filter stage wantEvaluatedNodes: ptr.To[int32](1), }, { @@ -2428,7 +2527,9 @@ func TestSchedulerSchedulePod(t *testing.T) { tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - nodes: []string{"1", "2"}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}}, + }, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: nil, wErr: &framework.FitError{ @@ -2439,6 +2540,50 @@ func TestSchedulerSchedulePod(t *testing.T) { }, }, }, + { + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "host1"}}}, + }, + pvcs: []v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC1", UID: types.UID("PVC1"), Namespace: v1.NamespaceDefault}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "PV1"}, + }, + }, + pvs: []v1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{Name: "PV1", UID: types.UID("PV1")}, + Spec: v1.PersistentVolumeSpec{ + NodeAffinity: &v1.VolumeNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: v1.NodeSelectorOpIn, + Values: []string{"host1"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + pod: st.MakePod().Name("pod1").UID("pod1").Namespace(v1.NamespaceDefault).PVC("PVC1").Obj(), + wantNodes: sets.New("node1"), + name: "hostname and nodename of the node do not match", + wErr: nil, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -2451,8 +2596,7 @@ func TestSchedulerSchedulePod(t *testing.T) { cache.AddPod(logger, pod) } var nodes []*v1.Node - for _, name := range test.nodes { - node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}} + for _, node := range test.nodes { nodes = append(nodes, node) cache.AddNode(logger, node) } @@ -2462,10 +2606,9 @@ func TestSchedulerSchedulePod(t *testing.T) { for _, pvc := range test.pvcs { metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true") cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, &pvc, metav1.CreateOptions{}) - if pvName := pvc.Spec.VolumeName; pvName != "" { - pv := v1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: pvName}} - cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{}) - } + } + for _, pv := range test.pvs { + _, _ = cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{}) } snapshot := internalcache.NewSnapshot(test.pods, nodes) fwk, err := tf.NewFramework( diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 601dc64601348..d1691cd806ef9 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -575,44 +575,6 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool { volume.ConfigMap != nil } -// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for -// local PersistentVolumes. nil is returned if the PV does not have any -// specific node affinity node selector terms and match expressions. -// PersistentVolume with node affinity has select and match expressions -// in the form of: -// -// nodeAffinity: -// required: -// nodeSelectorTerms: -// - matchExpressions: -// - key: kubernetes.io/hostname -// operator: In -// values: -// - -// - -func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string { - if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil { - return nil - } - - var result sets.Set[string] - for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { - var nodes sets.Set[string] - for _, matchExpr := range term.MatchExpressions { - if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn { - if nodes == nil { - nodes = sets.New(matchExpr.Values...) - } else { - nodes = nodes.Intersection(sets.New(matchExpr.Values...)) - } - } - } - result = result.Union(nodes) - } - - return sets.List(result) -} - // GetPodVolumeNames returns names of volumes that are used in a pod, // either as filesystem mount or raw block device, together with list // of all SELinux contexts of all containers that use the volumes. diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index e35cce690e1eb..f3711b0a84f7d 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -23,7 +23,6 @@ import ( "strings" "testing" - "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -941,304 +940,3 @@ func TestGetPodVolumeNames(t *testing.T) { }) } } - -func TestGetPersistentVolumeNodeNames(t *testing.T) { - tests := []struct { - name string - pv *v1.PersistentVolume - expectedNodeNames []string - }{ - { - name: "nil PV", - pv: nil, - }, - { - name: "PV missing node affinity", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - }, - }, - { - name: "PV node affinity missing required", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{}, - }, - }, - }, - { - name: "PV node affinity required zero selector terms", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{}, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required zero selector terms", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{}, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required zero match expressions", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{}, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required multiple match expressions", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpIn, - }, - { - Key: "bar", - Operator: v1.NodeSelectorOpIn, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required single match expression with no values", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{}, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required single match expression with single node", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node1", - }, - }, - { - name: "PV node affinity required single match expression with multiple nodes", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - "node2", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node1", - "node2", - }, - }, - { - name: "PV node affinity required multiple match expressions with multiple nodes", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "bar", - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - "node2", - }, - }, - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node3", - "node4", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node3", - "node4", - }, - }, - { - name: "PV node affinity required multiple node selectors multiple match expressions with multiple nodes", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - "node2", - }, - }, - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node2", - "node3", - }, - }, - }, - }, - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node1", - "node2", - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - nodeNames := GetLocalPersistentVolumeNodeNames(test.pv) - if diff := cmp.Diff(test.expectedNodeNames, nodeNames); diff != "" { - t.Errorf("Unexpected nodeNames (-want, +got):\n%s", diff) - } - }) - } -}