mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #130985 from kannon92/revert-129574-imagefs-tests
Revert "Separate SeparateDiskTests from eviction"
This commit is contained in:
commit
4288af4dc8
@ -451,10 +451,6 @@ var (
|
|||||||
// TODO: remove when SELinuxMount feature gate is enabled by default.
|
// TODO: remove when SELinuxMount feature gate is enabled by default.
|
||||||
SELinuxMountReadWriteOncePodOnly = framework.WithFeature(framework.ValidFeatures.Add("SELinuxMountReadWriteOncePodOnly"))
|
SELinuxMountReadWriteOncePodOnly = framework.WithFeature(framework.ValidFeatures.Add("SELinuxMountReadWriteOncePodOnly"))
|
||||||
|
|
||||||
// SeparateDiskTest (SIG-node, used for testing separate container runtime filesystem)
|
|
||||||
// The tests need separate disk settings on nodes and separate filesystems in storage.conf
|
|
||||||
SeparateDisk = framework.WithFeature(framework.ValidFeatures.Add("SeparateDisk"))
|
|
||||||
|
|
||||||
// Owner: sig-network
|
// Owner: sig-network
|
||||||
// Marks tests of KEP-1880 that require the `MultiCIDRServiceAllocator` feature gate
|
// Marks tests of KEP-1880 that require the `MultiCIDRServiceAllocator` feature gate
|
||||||
// and the networking.k8s.io/v1alpha1 API.
|
// and the networking.k8s.io/v1alpha1 API.
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -69,80 +68,27 @@ const (
|
|||||||
noStarvedResource = v1.ResourceName("none")
|
noStarvedResource = v1.ResourceName("none")
|
||||||
)
|
)
|
||||||
|
|
||||||
type EvictionTestConfig struct {
|
|
||||||
Signal string
|
|
||||||
PressureTimeout time.Duration
|
|
||||||
ExpectedNodeCondition v1.NodeConditionType
|
|
||||||
ExpectedStarvedResource v1.ResourceName
|
|
||||||
IsHardEviction bool // true for hard eviction, false for soft eviction
|
|
||||||
ResourceGetter func(summary *kubeletstatsv1alpha1.Summary) uint64 // Gets available resources (bytes, inodes, etc.)
|
|
||||||
ResourceThreshold uint64 // Consumed resources that trigger eviction
|
|
||||||
ThresholdPercentage string // either uint64 or percentage
|
|
||||||
EvictionGracePeriod string // Used for soft eviction
|
|
||||||
MetricsLogger func(ctx context.Context)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testRunner(f *framework.Framework, config EvictionTestConfig, specs []podEvictSpec) {
|
|
||||||
|
|
||||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
|
||||||
|
|
||||||
ginkgo.Context(fmt.Sprintf(testContextFmt, config.ExpectedNodeCondition), func() {
|
|
||||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
|
||||||
summary := eventuallyGetSummary(ctx)
|
|
||||||
available := config.ResourceGetter(summary)
|
|
||||||
|
|
||||||
if config.ThresholdPercentage == "" && available <= config.ResourceThreshold {
|
|
||||||
e2eskipper.Skipf("Too few resources free on the host for the eviction test to run")
|
|
||||||
}
|
|
||||||
|
|
||||||
var thresholdValue string
|
|
||||||
if config.ThresholdPercentage != "" {
|
|
||||||
thresholdValue = config.ThresholdPercentage
|
|
||||||
} else {
|
|
||||||
thresholdValue = fmt.Sprintf("%d", available-config.ResourceThreshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.IsHardEviction {
|
|
||||||
initialConfig.EvictionHard = map[string]string{config.Signal: thresholdValue}
|
|
||||||
} else {
|
|
||||||
initialConfig.EvictionSoft = map[string]string{config.Signal: thresholdValue}
|
|
||||||
initialConfig.EvictionSoftGracePeriod = map[string]string{config.Signal: config.EvictionGracePeriod}
|
|
||||||
initialConfig.EvictionMaxPodGracePeriod = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add any special overrides for specific tests
|
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
|
||||||
|
|
||||||
// Ensure that pods are not evicted because of the eviction-hard threshold
|
|
||||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
|
||||||
if !config.IsHardEviction {
|
|
||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
runEvictionTest(f, config.PressureTimeout, config.ExpectedNodeCondition,
|
|
||||||
config.ExpectedStarvedResource, config.MetricsLogger, specs)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
|
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
|
||||||
// Node disk pressure is induced by consuming all inodes on the node.
|
// Node disk pressure is induced by consuming all inodes on the node.
|
||||||
var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
||||||
testRunner(
|
f := framework.NewDefaultFramework("inode-eviction-test")
|
||||||
framework.NewDefaultFramework("inode-eviction-test"),
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
EvictionTestConfig{
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
Signal: string(evictionapi.SignalNodeFsInodesFree),
|
expectedStarvedResource := resourceInodes
|
||||||
PressureTimeout: 15 * time.Minute,
|
pressureTimeout := 15 * time.Minute
|
||||||
ExpectedNodeCondition: v1.NodeDiskPressure,
|
inodesConsumed := uint64(200000)
|
||||||
ExpectedStarvedResource: resourceInodes,
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
IsHardEviction: true,
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
ResourceThreshold: uint64(200000), // Inodes consumed
|
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||||
MetricsLogger: logInodeMetrics,
|
summary := eventuallyGetSummary(ctx)
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
inodesFree := *summary.Node.Fs.InodesFree
|
||||||
return *summary.Node.Fs.InodesFree
|
if inodesFree <= inodesConsumed {
|
||||||
},
|
e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
||||||
},
|
}
|
||||||
[]podEvictSpec{
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsInodesFree): fmt.Sprintf("%d", inodesFree-inodesConsumed)}
|
||||||
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
|
})
|
||||||
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logInodeMetrics, []podEvictSpec{
|
||||||
{
|
{
|
||||||
evictionPriority: 1,
|
evictionPriority: 1,
|
||||||
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
|
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
|
||||||
@ -154,6 +100,7 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
|
|||||||
pod: innocentPod(),
|
pod: innocentPod(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// ImageGCNoEviction tests that the eviction manager is able to prevent eviction
|
// ImageGCNoEviction tests that the eviction manager is able to prevent eviction
|
||||||
@ -280,32 +227,41 @@ var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.With
|
|||||||
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
||||||
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
||||||
var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
||||||
diskConsumed := resource.MustParse("4Gi")
|
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||||
testRunner(
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
framework.NewDefaultFramework("localstorage-eviction-test"),
|
pressureTimeout := 10 * time.Minute
|
||||||
EvictionTestConfig{
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
Signal: string(evictionapi.SignalNodeFsAvailable),
|
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||||
PressureTimeout: 10 * time.Minute,
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
ExpectedNodeCondition: v1.NodeDiskPressure,
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
ExpectedStarvedResource: v1.ResourceEphemeralStorage,
|
diskConsumed := resource.MustParse("4Gi")
|
||||||
ResourceThreshold: uint64(diskConsumed.Value()), // local storage
|
summary := eventuallyGetSummary(ctx)
|
||||||
IsHardEviction: false,
|
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||||
EvictionGracePeriod: "1m",
|
if availableBytes <= uint64(diskConsumed.Value()) {
|
||||||
MetricsLogger: logDiskMetrics,
|
e2eskipper.Skipf("Too little disk free on the host for the LocalStorageSoftEviction test to run")
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
}
|
||||||
return *summary.Node.Fs.AvailableBytes
|
initialConfig.EvictionSoft = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||||
},
|
initialConfig.EvictionSoftGracePeriod = map[string]string{string(evictionapi.SignalNodeFsAvailable): "1m"}
|
||||||
},
|
// Defer to the pod default grace period
|
||||||
[]podEvictSpec{
|
initialConfig.EvictionMaxPodGracePeriod = 30
|
||||||
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
|
// Ensure that pods are not evicted because of the eviction-hard threshold
|
||||||
|
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||||
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
||||||
|
})
|
||||||
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||||
{
|
{
|
||||||
evictionPriority: 1,
|
evictionPriority: 1,
|
||||||
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
|
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
|
||||||
|
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
|
||||||
|
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
evictionPriority: 0,
|
evictionPriority: 0,
|
||||||
pod: innocentPod(),
|
pod: innocentPod(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.Eviction, func() {
|
||||||
@ -348,28 +304,20 @@ var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodS
|
|||||||
|
|
||||||
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
||||||
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, feature.Eviction, func() {
|
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, feature.Eviction, func() {
|
||||||
sizeLimit := resource.MustParse("40Mi")
|
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||||
useOverLimit := 41 /* Mb */
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
useUnderLimit := 39 /* Mb */
|
evictionTestTimeout := 10 * time.Minute
|
||||||
containerLimit := v1.ResourceList{v1.ResourceEphemeralStorage: sizeLimit}
|
ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||||
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
|
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||||
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
||||||
|
})
|
||||||
|
sizeLimit := resource.MustParse("100Mi")
|
||||||
|
useOverLimit := 101 /* Mb */
|
||||||
|
useUnderLimit := 99 /* Mb */
|
||||||
|
containerLimit := v1.ResourceList{v1.ResourceEphemeralStorage: sizeLimit}
|
||||||
|
|
||||||
testRunner(
|
runEvictionTest(f, evictionTestTimeout, noPressure, noStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||||
framework.NewDefaultFramework("localstorage-eviction-test"),
|
|
||||||
EvictionTestConfig{
|
|
||||||
Signal: string(evictionapi.SignalMemoryAvailable),
|
|
||||||
PressureTimeout: 10 * time.Minute,
|
|
||||||
ExpectedNodeCondition: noPressure,
|
|
||||||
ExpectedStarvedResource: noStarvedResource,
|
|
||||||
IsHardEviction: true,
|
|
||||||
ThresholdPercentage: "0%", // Disabling this threshold to focus on pod-level limits
|
|
||||||
MetricsLogger: logDiskMetrics,
|
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
|
||||||
// We're not using node-level resource checks for this test
|
|
||||||
// Just need a non-zero value to pass the resource check
|
|
||||||
return 1024 * 1024 * 1024 // 1 GB (arbitrary non-zero value)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[]podEvictSpec{
|
|
||||||
{
|
{
|
||||||
evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation
|
evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation
|
||||||
pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{
|
pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{
|
||||||
@ -402,6 +350,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(
|
|||||||
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
|
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
||||||
@ -640,19 +589,6 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
// Nodes do not immediately report local storage capacity
|
// Nodes do not immediately report local storage capacity
|
||||||
// Sleep so that pods requesting local storage do not fail to schedule
|
// Sleep so that pods requesting local storage do not fail to schedule
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
// Check for Pressure
|
|
||||||
ginkgo.By("make sure node has no pressure before starting")
|
|
||||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
|
||||||
if expectedNodeCondition == noPressure || !hasNodeCondition(ctx, f, expectedNodeCondition) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("NodeCondition: %s encountered", expectedNodeCondition)
|
|
||||||
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.Succeed())
|
|
||||||
|
|
||||||
// prepull images only if its image-gc-eviction-test
|
|
||||||
if regexp.MustCompile(`(?i)image-gc.*`).MatchString(f.BaseName) {
|
|
||||||
gomega.Expect(PrePullAllImages(ctx)).Should(gomega.Succeed())
|
|
||||||
}
|
|
||||||
ginkgo.By("setting up pods to be used by tests")
|
ginkgo.By("setting up pods to be used by tests")
|
||||||
pods := []*v1.Pod{}
|
pods := []*v1.Pod{}
|
||||||
for _, spec := range testSpecs {
|
for _, spec := range testSpecs {
|
||||||
@ -720,23 +656,10 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.Succeed())
|
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.Succeed())
|
||||||
|
|
||||||
ginkgo.By("checking for correctly formatted eviction events")
|
ginkgo.By("checking for correctly formatted eviction events")
|
||||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
verifyEvictionEvents(ctx, f, testSpecs, expectedStarvedResource)
|
||||||
return verifyEvictionEvents(ctx, f, testSpecs, expectedStarvedResource)
|
|
||||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.Succeed())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
prePullImagesIfNeccecary := func() {
|
|
||||||
if framework.TestContext.PrepullImages {
|
|
||||||
// The disk eviction test may cause the prepulled images to be evicted,
|
|
||||||
// prepull those images again to ensure this test not affect following tests.
|
|
||||||
err := PrePullAllImages(ctx)
|
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Run prePull using a defer to make sure it is executed even when the assertions below fails
|
|
||||||
defer prePullImagesIfNeccecary()
|
|
||||||
|
|
||||||
ginkgo.By("deleting pods")
|
ginkgo.By("deleting pods")
|
||||||
for _, spec := range testSpecs {
|
for _, spec := range testSpecs {
|
||||||
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||||
@ -887,7 +810,7 @@ func verifyPodConditions(ctx context.Context, f *framework.Framework, testSpecs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) error {
|
func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) {
|
||||||
for _, spec := range testSpecs {
|
for _, spec := range testSpecs {
|
||||||
pod := spec.pod
|
pod := spec.pod
|
||||||
if spec.evictionPriority != 0 {
|
if spec.evictionPriority != 0 {
|
||||||
@ -901,22 +824,24 @@ func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs
|
|||||||
framework.ExpectNoError(err, "getting events")
|
framework.ExpectNoError(err, "getting events")
|
||||||
gomega.Expect(podEvictEvents.Items).To(gomega.HaveLen(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
|
gomega.Expect(podEvictEvents.Items).To(gomega.HaveLen(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
|
||||||
event := podEvictEvents.Items[0]
|
event := podEvictEvents.Items[0]
|
||||||
|
|
||||||
if expectedStarvedResource != noStarvedResource {
|
if expectedStarvedResource != noStarvedResource {
|
||||||
// Check the eviction.StarvedResourceKey
|
// Check the eviction.StarvedResourceKey
|
||||||
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
||||||
if !found {
|
if !found {
|
||||||
return fmt.Errorf("Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
||||||
pod.Name, expectedStarvedResource)
|
pod.Name, expectedStarvedResource)
|
||||||
}
|
}
|
||||||
starvedResource := v1.ResourceName(starved)
|
starvedResource := v1.ResourceName(starved)
|
||||||
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
||||||
pod.Name, expectedStarvedResource, starvedResource)
|
pod.Name, expectedStarvedResource, starvedResource)
|
||||||
|
|
||||||
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
|
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
|
||||||
if expectedStarvedResource == v1.ResourceMemory {
|
if expectedStarvedResource == v1.ResourceMemory {
|
||||||
// Check the eviction.OffendingContainersKey
|
// Check the eviction.OffendingContainersKey
|
||||||
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
||||||
if !found {
|
if !found {
|
||||||
return fmt.Errorf("Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
||||||
pod.Name)
|
pod.Name)
|
||||||
}
|
}
|
||||||
offendingContainers := strings.Split(offendersString, ",")
|
offendingContainers := strings.Split(offendersString, ",")
|
||||||
@ -928,7 +853,7 @@ func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs
|
|||||||
// Check the eviction.OffendingContainersUsageKey
|
// Check the eviction.OffendingContainersUsageKey
|
||||||
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
|
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
|
||||||
if !found {
|
if !found {
|
||||||
return fmt.Errorf("Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
|
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
|
||||||
pod.Name)
|
pod.Name)
|
||||||
}
|
}
|
||||||
offendingContainersUsage := strings.Split(offendingUsageString, ",")
|
offendingContainersUsage := strings.Split(offendingUsageString, ",")
|
||||||
@ -943,7 +868,6 @@ func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns TRUE if the node has the node condition, FALSE otherwise
|
// Returns TRUE if the node has the node condition, FALSE otherwise
|
||||||
|
@ -1,193 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2024 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package e2enode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
|
||||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
|
||||||
"k8s.io/kubernetes/test/e2e/feature"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
|
||||||
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Eviction Policy is described here:
|
|
||||||
// https://github.com/kubernetes/design-proposals-archive/blob/main/node/kubelet-eviction.md
|
|
||||||
// Stats is best effort and we evict based on stats being successful
|
|
||||||
|
|
||||||
// Container runtime filesystem should display different stats for imagefs and nodefs
|
|
||||||
var _ = SIGDescribe("Summary", feature.SeparateDisk, func() {
|
|
||||||
f := framework.NewDefaultFramework("summary-test")
|
|
||||||
f.It("should display different stats for imagefs and nodefs", func(ctx context.Context) {
|
|
||||||
summary := eventuallyGetSummary(ctx)
|
|
||||||
// Available and Capacity are the most useful to tell difference
|
|
||||||
gomega.Expect(summary.Node.Fs.AvailableBytes).ToNot(gomega.Equal(summary.Node.Runtime.ImageFs.AvailableBytes))
|
|
||||||
gomega.Expect(summary.Node.Fs.CapacityBytes).ToNot(gomega.Equal(summary.Node.Runtime.ImageFs.CapacityBytes))
|
|
||||||
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// Node disk pressure is induced by consuming all inodes on the Writeable Layer (imageFS).
|
|
||||||
var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.SeparateDisk, func() {
|
|
||||||
testRunner(
|
|
||||||
framework.NewDefaultFramework("inode-eviction-test"),
|
|
||||||
EvictionTestConfig{
|
|
||||||
Signal: string(evictionapi.SignalImageFsInodesFree),
|
|
||||||
PressureTimeout: 15 * time.Minute,
|
|
||||||
ExpectedNodeCondition: v1.NodeDiskPressure,
|
|
||||||
ExpectedStarvedResource: resourceInodes,
|
|
||||||
ResourceThreshold: uint64(200000), // Inodes consumed
|
|
||||||
IsHardEviction: true,
|
|
||||||
MetricsLogger: logInodeMetrics,
|
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
|
||||||
return *(summary.Node.Runtime.ImageFs.InodesFree)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[]podEvictSpec{
|
|
||||||
{
|
|
||||||
evictionPriority: 1,
|
|
||||||
pod: inodeConsumingPod("container-inode-hog", lotsOfFiles, nil),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 0,
|
|
||||||
pod: innocentPod(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
|
||||||
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
|
||||||
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
|
||||||
var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.SeparateDisk, func() {
|
|
||||||
diskConsumed := resource.MustParse("4Gi")
|
|
||||||
testRunner(
|
|
||||||
framework.NewDefaultFramework("local-storage-imagefs-soft-test"),
|
|
||||||
EvictionTestConfig{
|
|
||||||
Signal: string(evictionapi.SignalImageFsAvailable),
|
|
||||||
PressureTimeout: 10 * time.Minute,
|
|
||||||
ExpectedNodeCondition: v1.NodeDiskPressure,
|
|
||||||
ExpectedStarvedResource: v1.ResourceEphemeralStorage,
|
|
||||||
ResourceThreshold: uint64(diskConsumed.Value()), // local storage
|
|
||||||
IsHardEviction: false,
|
|
||||||
EvictionGracePeriod: "1m",
|
|
||||||
MetricsLogger: logDiskMetrics,
|
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
|
||||||
return *summary.Node.Runtime.ImageFs.AvailableBytes
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[]podEvictSpec{
|
|
||||||
{
|
|
||||||
evictionPriority: 1,
|
|
||||||
pod: diskConsumingPod("best-effort-disk", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 0,
|
|
||||||
pod: innocentPod(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
|
||||||
// removed localstoragecapacityisolation feature gate here as its not a feature gate anymore
|
|
||||||
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.SeparateDisk, func() {
|
|
||||||
sizeLimit := resource.MustParse("40Mi")
|
|
||||||
useOverLimit := 41 /* Mb */
|
|
||||||
useUnderLimit := 39 /* Mb */
|
|
||||||
containerLimit := v1.ResourceList{v1.ResourceEphemeralStorage: sizeLimit}
|
|
||||||
|
|
||||||
testRunner(
|
|
||||||
framework.NewDefaultFramework("localstorage-eviction-test"),
|
|
||||||
EvictionTestConfig{
|
|
||||||
Signal: string(evictionapi.SignalMemoryAvailable),
|
|
||||||
PressureTimeout: 10 * time.Minute,
|
|
||||||
ExpectedNodeCondition: noPressure,
|
|
||||||
ExpectedStarvedResource: noStarvedResource,
|
|
||||||
IsHardEviction: true,
|
|
||||||
ThresholdPercentage: "0%", // Disabling this threshold to focus on pod-level limits
|
|
||||||
MetricsLogger: logDiskMetrics,
|
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
|
||||||
// We're not using node-level resource checks for this test
|
|
||||||
// Just need a non-zero value to pass the resource check
|
|
||||||
return 1024 * 1024 * 1024 // 1 GB (arbitrary non-zero value)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[]podEvictSpec{
|
|
||||||
{
|
|
||||||
evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation
|
|
||||||
pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
|
|
||||||
}, v1.ResourceRequirements{}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 1, // This pod should cross the container limit by writing to its writable layer.
|
|
||||||
pod: diskConsumingPod("container-disk-limit", useOverLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 1, // This pod should hit the container limit by writing to an emptydir
|
|
||||||
pod: diskConsumingPod("container-emptydir-disk-limit", useOverLimit, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
|
||||||
v1.ResourceRequirements{Limits: containerLimit}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 0, // This pod should not be evicted because MemoryBackedVolumes cannot use more space than is allocated to them since SizeMemoryBackedVolumes was enabled
|
|
||||||
pod: diskConsumingPod("emptydir-memory-sizelimit", useOverLimit, &v1.VolumeSource{
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: "Memory", SizeLimit: &sizeLimit},
|
|
||||||
}, v1.ResourceRequirements{}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
|
|
||||||
pod: diskConsumingPod("emptydir-disk-below-sizelimit", useUnderLimit, &v1.VolumeSource{
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
|
|
||||||
}, v1.ResourceRequirements{}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
|
|
||||||
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ImageStorageVolumeEviction tests that the node responds to node disk pressure by evicting pods.
|
|
||||||
// Volumes write to the node filesystem so we are testing eviction on nodefs even if it
|
|
||||||
// exceeds imagefs limits.
|
|
||||||
var _ = SIGDescribe("ImageStorageVolumeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.SeparateDisk, func() {
|
|
||||||
testRunner(
|
|
||||||
framework.NewDefaultFramework("exceed-nodefs-test"),
|
|
||||||
EvictionTestConfig{
|
|
||||||
Signal: string(evictionapi.SignalNodeFsAvailable),
|
|
||||||
PressureTimeout: 15 * time.Minute,
|
|
||||||
ExpectedNodeCondition: v1.NodeDiskPressure,
|
|
||||||
ExpectedStarvedResource: v1.ResourceEphemeralStorage,
|
|
||||||
IsHardEviction: true,
|
|
||||||
ThresholdPercentage: "50%", // Use percentage instead of absolute threshold
|
|
||||||
MetricsLogger: logDiskMetrics,
|
|
||||||
ResourceGetter: func(summary *kubeletstatsv1alpha1.Summary) uint64 {
|
|
||||||
return *summary.Node.Fs.AvailableBytes
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[]podEvictSpec{
|
|
||||||
{
|
|
||||||
evictionPriority: 1, // This pod should exceed disk capacity on nodefs since writing to a volume
|
|
||||||
pod: diskConsumingPod("container-emptydir-disk-limit", 16000, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
|
||||||
v1.ResourceRequirements{}),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})
|
|
Loading…
Reference in New Issue
Block a user