mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
fix crossbuild, verify container restarts, and restart only once
This commit is contained in:
parent
2ada6e62d5
commit
e2718f3bc5
@ -151,20 +151,10 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
By("Making sure all containers restart the specified number of times")
|
By("Making sure all containers restart the specified number of times")
|
||||||
Eventually(func() error {
|
Eventually(func() error {
|
||||||
for _, podSpec := range test.testPods {
|
for _, podSpec := range test.testPods {
|
||||||
updatedPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podSpec.podName, metav1.GetOptions{})
|
err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(updatedPod.Status.ContainerStatuses) != podSpec.numContainers {
|
|
||||||
return fmt.Errorf("expected pod %s to have %d containers, actual: %d",
|
|
||||||
updatedPod.Name, podSpec.numContainers, len(updatedPod.Status.ContainerStatuses))
|
|
||||||
}
|
|
||||||
for _, containerStatus := range updatedPod.Status.ContainerStatuses {
|
|
||||||
if containerStatus.RestartCount != podSpec.restartCount {
|
|
||||||
return fmt.Errorf("pod %s had container with restartcount %d. Should have been at least %d",
|
|
||||||
updatedPod.Name, containerStatus.RestartCount, podSpec.restartCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, setupDuration, runtimePollInterval).Should(BeNil())
|
}, setupDuration, runtimePollInterval).Should(BeNil())
|
||||||
@ -292,7 +282,7 @@ func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
|||||||
containers = append(containers, v1.Container{
|
containers = append(containers, v1.Container{
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
Name: spec.getContainerName(i),
|
Name: spec.getContainerName(i),
|
||||||
Command: getRestartingContainerCommand("/test-empty-dir-mnt", i, int(spec.restartCount), ""),
|
Command: getRestartingContainerCommand("/test-empty-dir-mnt", i, spec.restartCount, ""),
|
||||||
VolumeMounts: []v1.VolumeMount{
|
VolumeMounts: []v1.VolumeMount{
|
||||||
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
||||||
},
|
},
|
||||||
@ -312,7 +302,7 @@ func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRestartingContainerCommand(path string, containerNum, restarts int, loopingCommand string) []string {
|
func getRestartingContainerCommand(path string, containerNum int, restarts int32, loopingCommand string) []string {
|
||||||
return []string{
|
return []string{
|
||||||
"sh",
|
"sh",
|
||||||
"-c",
|
"-c",
|
||||||
@ -326,3 +316,21 @@ func getRestartingContainerCommand(path string, containerNum, restarts int, loop
|
|||||||
path, strconv.Itoa(containerNum), restarts+1, loopingCommand),
|
path, strconv.Itoa(containerNum), restarts+1, loopingCommand),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func verifyPodRestartCount(f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error {
|
||||||
|
updatedPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(updatedPod.Status.ContainerStatuses) != expectedNumContainers {
|
||||||
|
return fmt.Errorf("expected pod %s to have %d containers, actual: %d",
|
||||||
|
updatedPod.Name, expectedNumContainers, len(updatedPod.Status.ContainerStatuses))
|
||||||
|
}
|
||||||
|
for _, containerStatus := range updatedPod.Status.ContainerStatuses {
|
||||||
|
if containerStatus.RestartCount != expectedRestartCount {
|
||||||
|
return fmt.Errorf("pod %s had container with restartcount %d. Should have been at least %d",
|
||||||
|
updatedPod.Name, containerStatus.RestartCount, expectedRestartCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -35,8 +35,6 @@ import (
|
|||||||
"github.com/onsi/gomega/types"
|
"github.com/onsi/gomega/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const restartCount = 3
|
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Summary API", func() {
|
var _ = framework.KubeDescribe("Summary API", func() {
|
||||||
f := framework.NewDefaultFramework("summary-test")
|
f := framework.NewDefaultFramework("summary-test")
|
||||||
Context("when querying /stats/summary", func() {
|
Context("when querying /stats/summary", func() {
|
||||||
@ -55,10 +53,22 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
const pod1 = "stats-busybox-1"
|
const pod1 = "stats-busybox-1"
|
||||||
|
|
||||||
By("Creating test pods")
|
By("Creating test pods")
|
||||||
pods := getSummaryTestPods(f, pod0, pod1)
|
numRestarts := int32(1)
|
||||||
|
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||||
f.PodClient().CreateBatch(pods)
|
f.PodClient().CreateBatch(pods)
|
||||||
// Wait for cAdvisor to collect 2 stats points, and for pods to restart
|
|
||||||
time.Sleep(45 * time.Second)
|
Eventually(func() error {
|
||||||
|
for _, pod := range pods {
|
||||||
|
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, time.Minute, 5*time.Second).Should(BeNil())
|
||||||
|
|
||||||
|
// Wait for cAdvisor to collect 2 stats points
|
||||||
|
time.Sleep(15 * time.Second)
|
||||||
|
|
||||||
// Setup expectations.
|
// Setup expectations.
|
||||||
const (
|
const (
|
||||||
@ -134,8 +144,8 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"StartTime": recent(maxStartAge),
|
"StartTime": recent(maxStartAge),
|
||||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"UsageNanoCores": bounded(100000, 1000000000),
|
"UsageNanoCores": bounded(100000, 1E9),
|
||||||
"UsageCoreNanoSeconds": bounded(10000000, 100000000000),
|
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
|
||||||
}),
|
}),
|
||||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
@ -262,7 +272,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func getSummaryTestPods(f *framework.Framework, names ...string) []*v1.Pod {
|
func getSummaryTestPods(f *framework.Framework, numRestarts int32, names ...string) []*v1.Pod {
|
||||||
pods := make([]*v1.Pod, 0, len(names))
|
pods := make([]*v1.Pod, 0, len(names))
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
pods = append(pods, &v1.Pod{
|
pods = append(pods, &v1.Pod{
|
||||||
@ -275,7 +285,7 @@ func getSummaryTestPods(f *framework.Framework, names ...string) []*v1.Pod {
|
|||||||
{
|
{
|
||||||
Name: "busybox-container",
|
Name: "busybox-container",
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
Command: getRestartingContainerCommand("/test-empty-dir-mnt", 0, restartCount, "ping -c 1 google.com; echo 'hello world' >> /test-empty-dir-mnt/file"),
|
Command: getRestartingContainerCommand("/test-empty-dir-mnt", 0, numRestarts, "ping -c 1 google.com; echo 'hello world' >> /test-empty-dir-mnt/file"),
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
// Must set memory limit to get MemoryStats.AvailableBytes
|
// Must set memory limit to get MemoryStats.AvailableBytes
|
||||||
|
Loading…
Reference in New Issue
Block a user