mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Fixes to node shutdown e2e test
- Test was failing due to using `sleep infinity` inside the busybox container which was going into a crash loop. `sleep infinity` isn't supported by the sleep version in busybox, so replace it with a `while true; sleep loop`. - Replace usage of dbus message emitting from gdbus to dbus-send. The test was failing on ubuntu which doesn't have gdbus installed. dbus-send is installed on COS and Ubuntu, so use it instead. - Replace check of pod phase with the test util function `PodRunningReady` which checks both phase as well as pod ready condition. - Add some more verbose logging to ease future debugging.
This commit is contained in:
parent
f63cac6cdf
commit
bd2e557b25
@ -28,13 +28,15 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNodeShutdown]", func() {
|
var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNodeShutdown]", func() {
|
||||||
@ -86,18 +88,18 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNod
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||||
|
|
||||||
|
ginkgo.By("Verifying batch pods are running")
|
||||||
for _, pod := range list.Items {
|
for _, pod := range list.Items {
|
||||||
framework.ExpectEqual(
|
if podReady, err := testutils.PodRunningReady(&pod); err != nil || !podReady {
|
||||||
pod.Status.Phase,
|
framework.Failf("Failed to start batch pod: %v", pod.Name)
|
||||||
v1.PodRunning,
|
}
|
||||||
"pod is not ready",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Emitting shutdown signal")
|
ginkgo.By("Emitting shutdown signal")
|
||||||
err = emitSignalPrepareForShutdown(true)
|
err = emitSignalPrepareForShutdown(true)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
ginkgo.By("Verifying that non-critical pods are shutdown")
|
||||||
// Not critical pod should be shutdown
|
// Not critical pod should be shutdown
|
||||||
gomega.Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||||
@ -111,10 +113,12 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNod
|
|||||||
for _, pod := range list.Items {
|
for _, pod := range list.Items {
|
||||||
if kubelettypes.IsCriticalPod(&pod) {
|
if kubelettypes.IsCriticalPod(&pod) {
|
||||||
if pod.Status.Phase != v1.PodRunning {
|
if pod.Status.Phase != v1.PodRunning {
|
||||||
|
framework.Logf("Expecting critcal pod to be running, but it's not currently. Pod: %q, Pod Status Phase: %q, Pod Status Reason: %q", pod.Name, pod.Status.Phase, pod.Status.Reason)
|
||||||
return fmt.Errorf("critical pod should not be shutdown, phase: %s", pod.Status.Phase)
|
return fmt.Errorf("critical pod should not be shutdown, phase: %s", pod.Status.Phase)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if pod.Status.Phase != v1.PodFailed || pod.Status.Reason != "Shutdown" {
|
if pod.Status.Phase != v1.PodFailed || pod.Status.Reason != "Shutdown" {
|
||||||
|
framework.Logf("Expecting non-critcal pod to be shutdown, but it's not currently. Pod: %q, Pod Status Phase: %q, Pod Status Reason: %q", pod.Name, pod.Status.Phase, pod.Status.Reason)
|
||||||
return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase)
|
return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -122,6 +126,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNod
|
|||||||
return nil
|
return nil
|
||||||
}, podStatusUpdateTimeout, pollInterval).Should(gomega.BeNil())
|
}, podStatusUpdateTimeout, pollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
|
ginkgo.By("Verifying that all pods are shutdown")
|
||||||
// All pod should be shutdown
|
// All pod should be shutdown
|
||||||
gomega.Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||||
@ -134,6 +139,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNod
|
|||||||
|
|
||||||
for _, pod := range list.Items {
|
for _, pod := range list.Items {
|
||||||
if pod.Status.Phase != v1.PodFailed || pod.Status.Reason != "Shutdown" {
|
if pod.Status.Phase != v1.PodFailed || pod.Status.Reason != "Shutdown" {
|
||||||
|
framework.Logf("Expecting pod to be shutdown, but it's not currently: Pod: %q, Pod Status Phase: %q, Pod Status Reason: %q", pod.Name, pod.Status.Phase, pod.Status.Reason)
|
||||||
return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase)
|
return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -188,10 +194,10 @@ func getGracePeriodOverrideTestPod(name string, node string, gracePeriod int64,
|
|||||||
Args: []string{`
|
Args: []string{`
|
||||||
_term() {
|
_term() {
|
||||||
echo "Caught SIGTERM signal!"
|
echo "Caught SIGTERM signal!"
|
||||||
sleep infinity
|
while true; do sleep 5; done
|
||||||
}
|
}
|
||||||
trap _term SIGTERM
|
trap _term SIGTERM
|
||||||
sleep infinity
|
while true; do sleep 5; done
|
||||||
`},
|
`},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -214,7 +220,7 @@ sleep infinity
|
|||||||
|
|
||||||
// Emits a fake PrepareForShutdown dbus message on system dbus. Will cause kubelet to react to an active shutdown event.
|
// Emits a fake PrepareForShutdown dbus message on system dbus. Will cause kubelet to react to an active shutdown event.
|
||||||
func emitSignalPrepareForShutdown(b bool) error {
|
func emitSignalPrepareForShutdown(b bool) error {
|
||||||
cmd := "gdbus emit --system --object-path /org/freedesktop/login1 --signal org.freedesktop.login1.Manager.PrepareForShutdown " + strconv.FormatBool(b)
|
cmd := "dbus-send --system /org/freedesktop/login1 org.freedesktop.login1.Manager.PrepareForShutdown boolean:" + strconv.FormatBool(b)
|
||||||
_, err := runCommand("sh", "-c", cmd)
|
_, err := runCommand("sh", "-c", cmd)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user