mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Merge pull request #130391 from bart0sh/PR174-e2e_node-fix-eviction-kubetest2
e2e_node: fix ImageGCNoEviction test for kubetest2
This commit is contained in:
commit
b82260f003
@ -103,8 +103,11 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
|
// ImageGCNoEviction tests that the eviction manager is able to prevent eviction
|
||||||
// Disk pressure is induced by pulling large images
|
// by reclaiming resources(inodes) through image garbage collection.
|
||||||
|
// Disk pressure is induced by consuming a lot of inodes on the node.
|
||||||
|
// Images are pre-pulled before running the test workload to ensure
|
||||||
|
// that the image garbage collerctor can remove them to avoid eviction.
|
||||||
var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
||||||
f := framework.NewDefaultFramework("image-gc-eviction-test")
|
f := framework.NewDefaultFramework("image-gc-eviction-test")
|
||||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
@ -113,6 +116,17 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer
|
|||||||
expectedStarvedResource := resourceInodes
|
expectedStarvedResource := resourceInodes
|
||||||
inodesConsumed := uint64(100000)
|
inodesConsumed := uint64(100000)
|
||||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
|
prepull := func(ctx context.Context) {
|
||||||
|
// Prepull images for image garbage collector to remove them
|
||||||
|
// when reclaiming resources
|
||||||
|
err := PrePullAllImages(ctx)
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
}
|
||||||
|
ginkgo.BeforeEach(prepull)
|
||||||
|
if framework.TestContext.PrepullImages {
|
||||||
|
ginkgo.AfterEach(prepull)
|
||||||
|
}
|
||||||
|
|
||||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||||
summary := eventuallyGetSummary(ctx)
|
summary := eventuallyGetSummary(ctx)
|
||||||
@ -120,6 +134,7 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer
|
|||||||
if inodesFree <= inodesConsumed {
|
if inodesFree <= inodesConsumed {
|
||||||
e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
||||||
}
|
}
|
||||||
|
framework.Logf("Setting eviction threshold to %d inodes", inodesFree-inodesConsumed)
|
||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsInodesFree): fmt.Sprintf("%d", inodesFree-inodesConsumed)}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsInodesFree): fmt.Sprintf("%d", inodesFree-inodesConsumed)}
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
})
|
})
|
||||||
@ -645,17 +660,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
prePullImagesIfNeccecary := func() {
|
|
||||||
if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages {
|
|
||||||
// The disk eviction test may cause the prepulled images to be evicted,
|
|
||||||
// prepull those images again to ensure this test not affect following tests.
|
|
||||||
err := PrePullAllImages(ctx)
|
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Run prePull using a defer to make sure it is executed even when the assertions below fails
|
|
||||||
defer prePullImagesIfNeccecary()
|
|
||||||
|
|
||||||
ginkgo.By("deleting pods")
|
ginkgo.By("deleting pods")
|
||||||
for _, spec := range testSpecs {
|
for _, spec := range testSpecs {
|
||||||
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||||
@ -673,17 +677,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil())
|
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
reduceAllocatableMemoryUsageIfCgroupv1()
|
reduceAllocatableMemoryUsageIfCgroupv1()
|
||||||
ginkgo.By("making sure we have all the required images for testing")
|
|
||||||
prePullImagesIfNeccecary()
|
|
||||||
|
|
||||||
// Ensure that the NodeCondition hasn't returned after pulling images
|
|
||||||
ginkgo.By(fmt.Sprintf("making sure NodeCondition %s doesn't exist again after pulling images", expectedNodeCondition))
|
|
||||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
|
||||||
if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) {
|
|
||||||
return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil())
|
|
||||||
|
|
||||||
ginkgo.By("making sure we can start a new pod after the test")
|
ginkgo.By("making sure we can start a new pod after the test")
|
||||||
podName := "test-admit-pod"
|
podName := "test-admit-pod"
|
||||||
@ -924,16 +917,25 @@ func logDiskMetrics(ctx context.Context) {
|
|||||||
if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil {
|
if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil {
|
||||||
framework.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
|
framework.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
|
||||||
}
|
}
|
||||||
|
if summary.Node.Fs != nil && summary.Node.Fs.Inodes != nil && summary.Node.Fs.InodesUsed != nil && summary.Node.Fs.InodesFree != nil {
|
||||||
|
framework.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesUsed: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesUsed, *summary.Node.Fs.InodesFree)
|
||||||
|
}
|
||||||
for _, pod := range summary.Pods {
|
for _, pod := range summary.Pods {
|
||||||
framework.Logf("Pod: %s", pod.PodRef.Name)
|
framework.Logf("Pod: %s", pod.PodRef.Name)
|
||||||
for _, container := range pod.Containers {
|
for _, container := range pod.Containers {
|
||||||
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil {
|
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil && container.Rootfs.AvailableBytes != nil {
|
||||||
framework.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes)
|
framework.Logf("--- summary Container: %s UsedBytes: %d, AvailableBytes: %d", container.Name, *container.Rootfs.UsedBytes, *container.Rootfs.AvailableBytes)
|
||||||
|
}
|
||||||
|
if container.Rootfs != nil && container.Rootfs.Inodes != nil && container.Rootfs.InodesUsed != nil && container.Rootfs.InodesFree != nil {
|
||||||
|
framework.Logf("--- summary Container: %s Inodes: %d, InodesUsed: %d, InodesFree: %d", container.Name, *container.Rootfs.Inodes, *container.Rootfs.InodesUsed, *container.Rootfs.InodesFree)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, volume := range pod.VolumeStats {
|
for _, volume := range pod.VolumeStats {
|
||||||
if volume.FsStats.InodesUsed != nil {
|
if volume.FsStats.UsedBytes != nil && volume.FsStats.AvailableBytes != nil {
|
||||||
framework.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes)
|
framework.Logf("--- summary Volume: %s UsedBytes: %d, AvailableBytest: %d", volume.Name, *volume.FsStats.UsedBytes, *volume.FsStats.AvailableBytes)
|
||||||
|
}
|
||||||
|
if volume.FsStats.Inodes != nil && volume.FsStats.InodesUsed != nil && volume.FsStats.InodesFree != nil {
|
||||||
|
framework.Logf("--- summary Volume: %s Inodes: %d, InodesUsed: %d, InodesFree: %d", volume.Name, *volume.FsStats.Inodes, *volume.FsStats.InodesUsed, *volume.FsStats.InodesFree)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user