diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index af7122ed3b0..1819d0b587a 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -86,10 +86,7 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P // CreateSecPodWithNodeSelection creates security pod with given claims func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) { pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup) - // Setting node - pod.Spec.NodeName = node.Name - pod.Spec.NodeSelector = node.Selector - pod.Spec.Affinity = node.Affinity + SetNodeSelection(pod, node) pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { diff --git a/test/e2e/framework/pod/node_selection.go b/test/e2e/framework/pod/node_selection.go index 01d1d4c165c..50d530ee705 100644 --- a/test/e2e/framework/pod/node_selection.go +++ b/test/e2e/framework/pod/node_selection.go @@ -87,3 +87,19 @@ func SetNodeAffinity(pod *v1.Pod, nodeName string) { SetAffinity(nodeSelection, nodeName) pod.Spec.Affinity = nodeSelection.Affinity } + +// SetNodeSelection modifies the given pod object with +// the specified NodeSelection +func SetNodeSelection(pod *v1.Pod, nodeSelection NodeSelection) { + pod.Spec.NodeSelector = nodeSelection.Selector + pod.Spec.Affinity = nodeSelection.Affinity + // pod.Spec.NodeName should not be set directly because + // it will bypass the scheduler, potentially causing + // kubelet to Fail the pod immediately if it's out of + // resources. Instead, we want the pod to remain + // pending in the scheduler until the node has resources + // freed up. + if nodeSelection.Name != "" { + SetNodeAffinity(pod, nodeSelection.Name) + } +} diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 30d0000f75d..03d1635bb7d 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -128,12 +128,8 @@ type TestConfig struct { // Wait for the pod to terminate successfully // False indicates that the pod is long running WaitForCompletion bool - // ServerNodeName is the spec.nodeName to run server pod on. Default is any node. - ServerNodeName string - // ClientNodeName is the spec.nodeName to run client pod on. Default is any node. - ClientNodeName string - // NodeSelector to use in pod spec (server, client and injector pods). - NodeSelector map[string]string + // ClientNodeSelection restricts where the client pod runs on. Default is any node. + ClientNodeSelection e2epod.NodeSelection } // Test contains a volume to mount into a client pod and its @@ -297,8 +293,6 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { }, Volumes: volumes, RestartPolicy: restartPolicy, - NodeName: config.ServerNodeName, - NodeSelector: config.NodeSelector, }, } @@ -389,10 +383,9 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix TerminationGracePeriodSeconds: &gracePeriod, SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions), Volumes: []v1.Volume{}, - NodeName: config.ClientNodeName, - NodeSelector: config.NodeSelector, }, } + e2epod.SetNodeSelection(clientPod, config.ClientNodeSelection) for i, test := range tests { volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index fa4f98ac276..2ff9a9c5f77 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { m.provisioner = config.GetUniqueDriverName() if tp.nodeSelectorKey != "" { - framework.AddOrUpdateLabelOnNode(m.cs, m.config.ClientNodeName, tp.nodeSelectorKey, f.Namespace.Name) + framework.AddOrUpdateLabelOnNode(m.cs, m.config.ClientNodeSelection.Name, tp.nodeSelectorKey, f.Namespace.Name) m.nodeLabel = map[string]string{ tp.nodeSelectorKey: f.Namespace.Name, } @@ -138,7 +138,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") } - nodeName := m.config.ClientNodeName + nodeName := m.config.ClientNodeSelection.Name scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Provisioner: sc.Provisioner, @@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { - nodeName := m.config.ClientNodeName + nodeName := m.config.ClientNodeSelection.Name nodeSelection := e2epod.NodeSelection{ Name: nodeName, } @@ -230,7 +230,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } if len(m.nodeLabel) > 0 && len(m.tp.nodeSelectorKey) > 0 { - framework.RemoveLabelOffNode(m.cs, m.config.ClientNodeName, m.tp.nodeSelectorKey) + framework.RemoveLabelOffNode(m.cs, m.config.ClientNodeSelection.Name, m.tp.nodeSelectorKey) } err := utilerrors.NewAggregate(errs) @@ -274,7 +274,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By("Checking if VolumeAttachment was created for the pod") handle := getVolumeHandle(m.cs, claim) - attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) + attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeSelection.Name))) attachmentName := fmt.Sprintf("csi-%x", attachmentHash) _, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{}) if err != nil { @@ -390,7 +390,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { nodeSelectorKey := fmt.Sprintf("attach-limit-csi-%s", f.Namespace.Name) init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2}) defer cleanup() - nodeName := m.config.ClientNodeName + nodeName := m.config.ClientNodeSelection.Name driverName := m.config.GetUniqueDriverName() csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index a772c5e5f57..408c2fed89f 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -53,6 +53,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" @@ -175,10 +176,10 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) config := &testsuites.PerTestConfig{ - Driver: h, - Prefix: "hostpath", - Framework: f, - ClientNodeName: node.Name, + Driver: h, + Prefix: "hostpath", + Framework: f, + ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, } o := utils.PatchCSIOptions{ @@ -299,10 +300,10 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) config := &testsuites.PerTestConfig{ - Driver: m, - Prefix: "mock", - Framework: f, - ClientNodeName: node.Name, + Driver: m, + Prefix: "mock", + Framework: f, + ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, } containerArgs := []string{"--name=csi-mock-" + f.UniqueName} @@ -324,7 +325,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest DriverContainerName: "mock", DriverContainerArguments: containerArgs, ProvisionerContainerName: "csi-provisioner", - NodeName: config.ClientNodeName, + NodeName: node.Name, PodInfo: m.podInfo, CanAttach: &m.attachable, VolumeLifecycleModes: &[]storagev1beta1.VolumeLifecycleMode{ diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index fa5107ea69a..99a8e3c0080 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -442,7 +442,7 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes c, serverPod, serverIP, iqn := newISCSIServer(cs, ns.Name) config.ServerConfig = &c - config.ClientNodeName = c.ClientNodeName + config.ClientNodeSelection = c.ClientNodeSelection return &iSCSIVolume{ serverPod: serverPod, serverIP: serverIP, @@ -473,7 +473,7 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config volume.Tes } pod, ip = volume.CreateStorageServer(cs, config) // Make sure the client runs on the same node as server so we don't need to open any firewalls. - config.ClientNodeName = pod.Spec.NodeName + config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName} return config, pod, ip, iqn } @@ -820,7 +820,7 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType // pods should be scheduled on the node node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) - config.ClientNodeName = node.Name + config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} return nil } @@ -902,7 +902,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v // pods should be scheduled on the node node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) - config.ClientNodeName = node.Name + config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath) privileged := true @@ -1317,8 +1317,10 @@ func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo Framework: f, } if framework.NodeOSDistroIs("windows") { - config.ClientNodeSelector = map[string]string{ - "beta.kubernetes.io/os": "windows", + config.ClientNodeSelection = e2epod.NodeSelection{ + Selector: map[string]string{ + "beta.kubernetes.io/os": "windows", + }, } } return config, func() {} @@ -1329,8 +1331,10 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes if volType == testpatterns.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. - config.ClientNodeSelector = map[string]string{ - v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, + config.ClientNodeSelection = e2epod.NodeSelection{ + Selector: map[string]string{ + v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, + }, } } ginkgo.By("creating a test gce pd volume") @@ -1710,8 +1714,10 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf Framework: f, } if framework.NodeOSDistroIs("windows") { - config.ClientNodeSelector = map[string]string{ - "beta.kubernetes.io/os": "windows", + config.ClientNodeSelection = e2epod.NodeSelection{ + Selector: map[string]string{ + "beta.kubernetes.io/os": "windows", + }, } } return config, func() {} @@ -1721,8 +1727,10 @@ func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp if volType == testpatterns.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. - config.ClientNodeSelector = map[string]string{ - v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, + config.ClientNodeSelection = e2epod.NodeSelection{ + Selector: map[string]string{ + v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, + }, } } ginkgo.By("creating a test aws volume") @@ -1858,10 +1866,10 @@ func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo } return &testsuites.PerTestConfig{ - Driver: l, - Prefix: "local", - Framework: f, - ClientNodeName: l.node.Name, + Driver: l, + Prefix: "local", + Framework: f, + ClientNodeSelection: e2epod.NodeSelection{Name: l.node.Name}, }, func() { l.hostExec.Cleanup() } @@ -1872,7 +1880,7 @@ func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes case testpatterns.PreprovisionedPV: node := l.node // assign this to schedule pod on this node - config.ClientNodeName = node.Name + config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} return &localVolume{ ltrMgr: l.ltrMgr, ltr: l.ltrMgr.Create(node, l.volumeType, nil), diff --git a/test/e2e/storage/external/BUILD b/test/e2e/storage/external/BUILD index b87cd0d4ff1..a469aff19e1 100644 --- a/test/e2e/storage/external/BUILD +++ b/test/e2e/storage/external/BUILD @@ -15,6 +15,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/config:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/skipper:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index 5bdf8fd7a6a..99f977f94dc 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" @@ -330,10 +331,10 @@ func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) st func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { config := &testsuites.PerTestConfig{ - Driver: d, - Prefix: "external", - Framework: f, - ClientNodeName: d.ClientNodeName, + Driver: d, + Prefix: "external", + Framework: f, + ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName}, } return config, func() {} } diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index bf4cc74950f..1d512e4ea94 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -29,6 +29,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -177,9 +178,9 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) config = volume.TestConfig{ - Namespace: ns.Name, - Prefix: "flex", - ClientNodeName: node.Name, + Namespace: ns.Name, + Prefix: "flex", + ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, } suffix = ns.Name }) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index f2bc0fd8a93..69ba10db7c2 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -988,7 +988,8 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, if pod == nil { return } - pod.Spec.NodeName = nodeName + + e2epod.SetNodeAffinity(pod, nodeName) return } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 479a7c323d7..0ace7c28119 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -429,10 +429,9 @@ func convertTestConfig(in *PerTestConfig) volume.TestConfig { } return volume.TestConfig{ - Namespace: in.Framework.Namespace.Name, - Prefix: in.Prefix, - ClientNodeName: in.ClientNodeName, - NodeSelector: in.ClientNodeSelector, + Namespace: in.Framework.Namespace.Name, + Prefix: in.Prefix, + ClientNodeSelection: in.ClientNodeSelection, } } diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index 2719901e362..7e564031bb7 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -160,7 +160,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern pvcs = append(pvcs, l.resource.Pvc) } ginkgo.By("Creating a pod with pvc") - l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) framework.ExpectNoError(err, "While creating pods for kubelet restart test") if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil { diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index c52f02a3b1d..10e039d6713 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -101,7 +101,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns Client: l.config.Framework.ClientSet, Namespace: f.Namespace.Name, DriverName: eDriver.GetCSIDriverName(l.config), - Node: e2epod.NodeSelection{Name: l.config.ClientNodeName}, + Node: l.config.ClientNodeSelection, GetVolume: func(volumeNumber int) (map[string]string, bool, bool) { return eDriver.GetVolume(l.config, volumeNumber) }, @@ -291,9 +291,6 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri }, }, Spec: v1.PodSpec{ - NodeName: node.Name, - NodeSelector: node.Selector, - Affinity: node.Affinity, Containers: []v1.Container{ { Name: "csi-volume-tester", @@ -304,6 +301,7 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri RestartPolicy: v1.RestartPolicyNever, }, } + e2epod.SetNodeSelection(pod, node) for i, csiVolume := range csiVolumes { name := fmt.Sprintf("my-volume-%d", i) diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 68a783a97c3..36dfb1ef265 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -150,7 +150,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter } TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, - e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */) + l.config.ClientNodeSelection, pvcs, true /* sameNode */) }) // This tests below configuration: @@ -178,14 +178,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter if len(nodes.Items) < 2 { e2eskipper.Skipf("Number of available nodes is less than 2 - skipping") } - if l.config.ClientNodeName != "" { + if l.config.ClientNodeSelection.Name != "" { e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name) } // For multi-node tests there must be enough nodes with the same toopology to schedule the pods - nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName} topologyKeys := dInfo.TopologyKeys if len(topologyKeys) != 0 { - if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil { + if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil { framework.Failf("Error setting topology requirements: %v", err) } } @@ -201,7 +200,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter } TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, - nodeSelection, pvcs, false /* sameNode */) + l.config.ClientNodeSelection, pvcs, false /* sameNode */) }) // This tests below configuration (only pattern is tested): @@ -240,7 +239,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter } TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, - e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */) + l.config.ClientNodeSelection, pvcs, true /* sameNode */) }) // This tests below configuration (only pattern is tested): @@ -272,14 +271,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter if len(nodes.Items) < 2 { e2eskipper.Skipf("Number of available nodes is less than 2 - skipping") } - if l.config.ClientNodeName != "" { + if l.config.ClientNodeSelection.Name != "" { e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name) } // For multi-node tests there must be enough nodes with the same toopology to schedule the pods - nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName} topologyKeys := dInfo.TopologyKeys if len(topologyKeys) != 0 { - if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil { + if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil { framework.Failf("Error setting topology requirements: %v", err) } } @@ -300,7 +298,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter } TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, - nodeSelection, pvcs, false /* sameNode */) + l.config.ClientNodeSelection, pvcs, false /* sameNode */) }) // This tests below configuration: @@ -325,7 +323,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Test access to the volume from pods on different node TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, - e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.Pvc, numPods, true /* sameNode */) + l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */) }) // This tests below configuration: @@ -349,14 +347,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter if len(nodes.Items) < numPods { e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods)) } - if l.config.ClientNodeName != "" { + if l.config.ClientNodeSelection.Name != "" { e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name) } // For multi-node tests there must be enough nodes with the same toopology to schedule the pods - nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName} topologyKeys := dInfo.TopologyKeys if len(topologyKeys) != 0 { - if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil { + if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil { framework.Failf("Error setting topology requirements: %v", err) } } @@ -368,7 +365,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Test access to the volume from pods on different node TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, - nodeSelection, resource.Pvc, numPods, false /* sameNode */) + l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */) }) } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 72efe79d296..91096cb9b6a 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -181,7 +181,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { - PVWriteReadSingleNodeCheck(l.cs, claim, e2epod.NodeSelection{Name: l.config.ClientNodeName}) + PVWriteReadSingleNodeCheck(l.cs, claim, l.config.ClientNodeSelection) } l.testCase.TestDynamicProvisioning() }) @@ -201,14 +201,14 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte dc := l.config.Framework.DynamicClient vsc := sDriver.GetSnapshotClass(l.config) - dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(e2epod.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc) + dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(l.config.ClientNodeSelection, l.cs, dc, l.pvc, l.sc, vsc) defer cleanupFunc() l.pvc.Spec.DataSource = dataSource l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, e2epod.NodeSelection{Name: l.config.ClientNodeName}) + RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, l.config.ClientNodeSelection) } l.testCase.TestDynamicProvisioning() }) @@ -221,14 +221,14 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte defer cleanup() dc := l.config.Framework.DynamicClient - dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(e2epod.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.sourcePVC, l.sc) + dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(l.config.ClientNodeSelection, l.cs, dc, l.sourcePVC, l.sc) defer dataSourceCleanup() l.pvc.Spec.DataSource = dataSource l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, e2epod.NodeSelection{Name: l.config.ClientNodeName}) + RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, l.config.ClientNodeSelection) } l.testCase.TestDynamicProvisioning() }) @@ -567,9 +567,6 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command }, }, Spec: v1.PodSpec{ - NodeName: node.Name, - NodeSelector: node.Selector, - Affinity: node.Affinity, Containers: []v1.Container{ { Name: "volume-tester", @@ -598,6 +595,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command }, } + e2epod.SetNodeSelection(pod, node) pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod: %v", err) return pod diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 847806829ff..56793995281 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/kubernetes/test/e2e/framework" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/framework/volume" @@ -152,7 +151,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt ginkgo.By("starting a pod to use the claim") command := "echo 'hello world' > /mnt/test/data" - pod := StartInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, e2epod.NodeSelection{Name: config.ClientNodeName}) + pod := StartInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) defer StopPod(cs, pod) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 7606ac4376e..791b4403983 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -151,12 +151,10 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T subPath := f.Namespace.Name l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true) - l.pod.Spec.NodeName = l.config.ClientNodeName - l.pod.Spec.NodeSelector = l.config.ClientNodeSelector + e2epod.SetNodeSelection(l.pod, l.config.ClientNodeSelection) l.formatPod = volumeFormatPod(f, l.resource.VolSource) - l.formatPod.Spec.NodeName = l.config.ClientNodeName - l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector + e2epod.SetNodeSelection(l.formatPod, l.config.ClientNodeSelection) l.subPathDir = filepath.Join(volumePath, subPath) l.filePathInSubpath = filepath.Join(volumePath, fileName) diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index 1cdb0a84aae..4db7d27a29a 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -205,16 +206,10 @@ type PerTestConfig struct { // The framework instance allocated for the current test. Framework *framework.Framework - // If non-empty, then pods using a volume will be scheduled - // onto the node with this name. Otherwise Kubernetes will + // If non-empty, Pods using a volume will be scheduled + // according to the NodeSelection. Otherwise Kubernetes will // pick a node. - ClientNodeName string - - // Some tests also support scheduling pods onto nodes with - // these label/value pairs. As not all tests use this field, - // a driver that absolutely needs the pods on a specific - // node must use ClientNodeName. - ClientNodeSelector map[string]string + ClientNodeSelection e2epod.NodeSelection // Some test drivers initialize a storage server. This is // the configuration that then has to be used to run tests. diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index f3e725ba030..7ce2511043e 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -166,7 +166,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte var err error ginkgo.By("Creating a pod with dynamically provisioned volume") - l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) defer func() { err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") @@ -203,7 +203,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte l.resource.Pvc = npvc ginkgo.By("Creating a new pod with same volume") - l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) + l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) defer func() { err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2) framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") @@ -224,7 +224,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte var err error ginkgo.By("Creating a pod with dynamically provisioned volume") - l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) defer func() { err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 07412bff3de..699d1d2c7c6 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -183,7 +183,7 @@ func createFileSizes(maxFileSize int64) []int64 { func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { var gracePeriod int64 = 1 volName := fmt.Sprintf("io-volume-%s", config.Namespace) - return &v1.Pod{ + pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", @@ -238,10 +238,11 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc }, }, RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails - NodeName: config.ClientNodeName, - NodeSelector: config.NodeSelector, }, } + + e2epod.SetNodeSelection(pod, config.ClientNodeSelection) + return pod } // Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file. diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index f8f9f39b2ad..dfd754eeeb8 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -128,7 +128,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte ginkgo.By("Picking a node") // Some CSI drivers are deployed to a single node (e.g csi-hostpath), // so we use that node instead of picking a random one. - nodeName := l.config.ClientNodeName + nodeName := l.config.ClientNodeSelection.Name if nodeName == "" { node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 2c6a4633f5e..5d19553a122 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -215,7 +215,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern ginkgo.By("Creating pod") pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) // Setting node - pod.Spec.NodeName = l.config.ClientNodeName + e2epod.SetNodeSelection(pod, l.config.ClientNodeSelection) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") defer func() { diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 9a20f53df38..c2bdd520874 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -245,10 +245,9 @@ func testScriptInPod( }, }, RestartPolicy: v1.RestartPolicyNever, - NodeSelector: config.ClientNodeSelector, - NodeName: config.ClientNodeName, }, } + e2epod.SetNodeSelection(pod, config.ClientNodeSelection) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName})