mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
e2e don't set pod.Spec.NodeName directly
based on this comment in
ea07644522/test/e2e/framework/pod/node_selection.go (L96-L101)
// pod.Spec.NodeName should not be set directly because
// it will bypass the scheduler, potentially causing
// kubelet to Fail the pod immediately if it's out of
// resources. Instead, we want the pod to remain
// pending in the scheduler until the node has resources
// freed up.
This commit is contained in:
parent
97bdd7b9eb
commit
60c3480740
@ -143,7 +143,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
// Create a pod in one node to create the UDP traffic against the NodePort service every 5 seconds
|
||||
ginkgo.By("creating a client pod for probing the service " + serviceName)
|
||||
clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
|
||||
clientPod.Spec.NodeName = clientNodeInfo.name
|
||||
nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
|
||||
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort)
|
||||
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
|
||||
clientPod.Spec.Containers[0].Name = podClient
|
||||
@ -158,7 +159,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
|
||||
serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
|
||||
serverPod1.Labels = udpJig.Labels
|
||||
serverPod1.Spec.NodeName = serverNodeInfo.name
|
||||
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
|
||||
fr.PodClient().CreateSync(serverPod1)
|
||||
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
|
||||
@ -180,7 +182,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
|
||||
serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
|
||||
serverPod2.Labels = udpJig.Labels
|
||||
serverPod2.Spec.NodeName = serverNodeInfo.name
|
||||
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
|
||||
fr.PodClient().CreateSync(serverPod2)
|
||||
|
||||
// and delete the first pod
|
||||
@ -216,7 +219,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
// Create a pod in one node to create the UDP traffic against the ClusterIP service every 5 seconds
|
||||
ginkgo.By("creating a client pod for probing the service " + serviceName)
|
||||
clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
|
||||
clientPod.Spec.NodeName = clientNodeInfo.name
|
||||
nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
|
||||
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
|
||||
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
|
||||
clientPod.Spec.Containers[0].Name = podClient
|
||||
@ -231,7 +235,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
|
||||
serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
|
||||
serverPod1.Labels = udpJig.Labels
|
||||
serverPod1.Spec.NodeName = serverNodeInfo.name
|
||||
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
|
||||
fr.PodClient().CreateSync(serverPod1)
|
||||
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
|
||||
@ -253,7 +258,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
|
||||
serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
|
||||
serverPod2.Labels = udpJig.Labels
|
||||
serverPod2.Spec.NodeName = serverNodeInfo.name
|
||||
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
|
||||
fr.PodClient().CreateSync(serverPod2)
|
||||
|
||||
// and delete the first pod
|
||||
@ -290,7 +296,6 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
Labels: serverLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: serverNodeInfo.name,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "boom-server",
|
||||
@ -324,6 +329,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection)
|
||||
fr.PodClient().CreateSync(serverPod)
|
||||
ginkgo.By("Server pod created on node " + serverNodeInfo.name)
|
||||
|
||||
@ -351,7 +358,6 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
Name: "startup-script",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: clientNodeInfo.name,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "startup-script",
|
||||
@ -364,6 +370,8 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name}
|
||||
e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
|
||||
|
||||
fr.PodClient().CreateSync(pod)
|
||||
ginkgo.By("Client pod created")
|
||||
|
@ -153,7 +153,8 @@ var _ = common.SIGDescribe("Firewall rule", func() {
|
||||
fmt.Sprintf("--http-port=%d", firewallTestHTTPPort),
|
||||
fmt.Sprintf("--udp-port=%d", firewallTestUDPPort))
|
||||
pod.ObjectMeta.Labels = jig.Labels
|
||||
pod.Spec.NodeName = nodeName
|
||||
nodeSelection := e2epod.NodeSelection{Name: nodeName}
|
||||
e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
|
||||
pod.Spec.HostNetwork = true
|
||||
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -54,7 +54,8 @@ func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host str
|
||||
pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil)
|
||||
pod.Spec.Containers[0].Command = command
|
||||
pod.Spec.Containers[0].Args = nil // otherwise 'pause` is magically an argument to nc, which causes all hell to break loose
|
||||
pod.Spec.NodeName = nodeName
|
||||
nodeSelection := e2epod.NodeSelection{Name: nodeName}
|
||||
e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyNever
|
||||
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/network/common"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -73,7 +74,8 @@ var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
// target Pod at Node
|
||||
testPod.Spec.NodeName = node.Name
|
||||
nodeSelection := e2epod.NodeSelection{Name: node.Name}
|
||||
e2epod.SetNodeSelection(&testPod.Spec, nodeSelection)
|
||||
_, err = pc.Create(context.TODO(), &testPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
@ -1982,7 +1982,8 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName))
|
||||
execPod := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeName = nodeName
|
||||
nodeSelection := e2epod.NodeSelection{Name: nodeName}
|
||||
e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
|
||||
})
|
||||
|
||||
serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port))
|
||||
@ -2936,7 +2937,8 @@ var _ = common.SIGDescribe("SCTP [Feature:SCTP] [LinuxOnly]", func() {
|
||||
podName := "hostport"
|
||||
ports := []v1.ContainerPort{{Protocol: v1.ProtocolSCTP, ContainerPort: 5060, HostPort: 5060}}
|
||||
podSpec := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, ports)
|
||||
podSpec.Spec.NodeName = node.Name
|
||||
nodeSelection := e2epod.NodeSelection{Name: node.Name}
|
||||
e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
|
||||
f.PodClient().CreateSync(podSpec)
|
||||
|
Loading…
Reference in New Issue
Block a user