e2e: avoid setting NodeName for CSI driver deployments

We don't want to set the name directly because then starting the pod
can fail when the node is temporarily out of resources
(https://github.com/kubernetes/kubernetes/issues/87855).

For CSI driver deployments, we have three options:
- modify the pod spec with custom code, similar
  to how the NodeSelection utility code does it
- add variants of SetNodeSelection and SetNodeAffinity which
  work with a pod spec instead of a pod
- change their parameter from pod to pod spec and then use
  them also when patching a pod spec

The last approach is used here because it seems more general. There
might be other cases in the future where there's only a pod spec that
needs to be modified.
This commit is contained in:
Patrick Ohly 2020-02-19 10:46:08 +01:00
parent 86141c0cce
commit d71829a1fc
13 changed files with 20 additions and 19 deletions

View File

@ -86,7 +86,7 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
// CreateSecPodWithNodeSelection creates security pod with given claims // CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) { func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup) pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
SetNodeSelection(pod, node) SetNodeSelection(&pod.Spec, node)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil { if err != nil {

View File

@ -82,17 +82,17 @@ func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
// SetNodeAffinity modifies the given pod object with // SetNodeAffinity modifies the given pod object with
// NodeAffinity to the given node name. // NodeAffinity to the given node name.
func SetNodeAffinity(pod *v1.Pod, nodeName string) { func SetNodeAffinity(podSpec *v1.PodSpec, nodeName string) {
nodeSelection := &NodeSelection{} nodeSelection := &NodeSelection{}
SetAffinity(nodeSelection, nodeName) SetAffinity(nodeSelection, nodeName)
pod.Spec.Affinity = nodeSelection.Affinity podSpec.Affinity = nodeSelection.Affinity
} }
// SetNodeSelection modifies the given pod object with // SetNodeSelection modifies the given pod object with
// the specified NodeSelection // the specified NodeSelection
func SetNodeSelection(pod *v1.Pod, nodeSelection NodeSelection) { func SetNodeSelection(podSpec *v1.PodSpec, nodeSelection NodeSelection) {
pod.Spec.NodeSelector = nodeSelection.Selector podSpec.NodeSelector = nodeSelection.Selector
pod.Spec.Affinity = nodeSelection.Affinity podSpec.Affinity = nodeSelection.Affinity
// pod.Spec.NodeName should not be set directly because // pod.Spec.NodeName should not be set directly because
// it will bypass the scheduler, potentially causing // it will bypass the scheduler, potentially causing
// kubelet to Fail the pod immediately if it's out of // kubelet to Fail the pod immediately if it's out of
@ -100,6 +100,6 @@ func SetNodeSelection(pod *v1.Pod, nodeSelection NodeSelection) {
// pending in the scheduler until the node has resources // pending in the scheduler until the node has resources
// freed up. // freed up.
if nodeSelection.Name != "" { if nodeSelection.Name != "" {
SetNodeAffinity(pod, nodeSelection.Name) SetNodeAffinity(podSpec, nodeSelection.Name)
} }
} }

View File

@ -381,7 +381,7 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
Volumes: []v1.Volume{}, Volumes: []v1.Volume{},
}, },
} }
e2epod.SetNodeSelection(clientPod, config.ClientNodeSelection) e2epod.SetNodeSelection(&clientPod.Spec, config.ClientNodeSelection)
for i, test := range tests { for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i) volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)

View File

@ -689,7 +689,7 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum
}, },
}, },
} }
e2epod.SetNodeSelection(pod, node) e2epod.SetNodeSelection(&pod.Spec, node)
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
} }

View File

@ -989,7 +989,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
return return
} }
e2epod.SetNodeAffinity(pod, nodeName) e2epod.SetNodeAffinity(&pod.Spec, nodeName)
return return
} }

View File

@ -301,7 +301,7 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
e2epod.SetNodeSelection(pod, node) e2epod.SetNodeSelection(&pod.Spec, node)
for i, csiVolume := range csiVolumes { for i, csiVolume := range csiVolumes {
name := fmt.Sprintf("my-volume-%d", i) name := fmt.Sprintf("my-volume-%d", i)

View File

@ -595,7 +595,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
}, },
} }
e2epod.SetNodeSelection(pod, node) e2epod.SetNodeSelection(&pod.Spec, node)
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
return pod return pod

View File

@ -151,10 +151,10 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
subPath := f.Namespace.Name subPath := f.Namespace.Name
l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true) l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true)
e2epod.SetNodeSelection(l.pod, l.config.ClientNodeSelection) e2epod.SetNodeSelection(&l.pod.Spec, l.config.ClientNodeSelection)
l.formatPod = volumeFormatPod(f, l.resource.VolSource) l.formatPod = volumeFormatPod(f, l.resource.VolSource)
e2epod.SetNodeSelection(l.formatPod, l.config.ClientNodeSelection) e2epod.SetNodeSelection(&l.formatPod.Spec, l.config.ClientNodeSelection)
l.subPathDir = filepath.Join(volumePath, subPath) l.subPathDir = filepath.Join(volumePath, subPath)
l.filePathInSubpath = filepath.Join(volumePath, fileName) l.filePathInSubpath = filepath.Join(volumePath, fileName)

View File

@ -241,7 +241,7 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc
}, },
} }
e2epod.SetNodeSelection(pod, config.ClientNodeSelection) e2epod.SetNodeSelection(&pod.Spec, config.ClientNodeSelection)
return pod return pod
} }

View File

@ -215,7 +215,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Setting node // Setting node
e2epod.SetNodeSelection(pod, l.config.ClientNodeSelection) e2epod.SetNodeSelection(&pod.Spec, l.config.ClientNodeSelection)
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod") framework.ExpectNoError(err, "Failed to create pod")
defer func() { defer func() {

View File

@ -247,7 +247,7 @@ func testScriptInPod(
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
e2epod.SetNodeSelection(pod, config.ClientNodeSelection) e2epod.SetNodeSelection(&pod.Spec, config.ClientNodeSelection)
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName}) f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName})

View File

@ -25,6 +25,7 @@ import (
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1" storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
// PatchCSIDeployment modifies the CSI driver deployment: // PatchCSIDeployment modifies the CSI driver deployment:
@ -98,7 +99,7 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
patchContainers(spec.Containers) patchContainers(spec.Containers)
patchVolumes(spec.Volumes) patchVolumes(spec.Volumes)
if o.NodeName != "" { if o.NodeName != "" {
spec.NodeName = o.NodeName e2epod.SetNodeSelection(spec, e2epod.NodeSelection{Name: o.NodeName})
} }
} }

View File

@ -80,7 +80,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
// be immediately Failed by kubelet if it's out of space. Instead // be immediately Failed by kubelet if it's out of space. Instead
// Pods will be pending in the scheduler until there is space freed // Pods will be pending in the scheduler until there is space freed
// up. // up.
e2epod.SetNodeAffinity(hostExecPod, node) e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
hostExecPod.Spec.Volumes = []v1.Volume{ hostExecPod.Spec.Volumes = []v1.Volume{
{ {
// Required to enter into host mount namespace via nsenter. // Required to enter into host mount namespace via nsenter.