mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
Merge pull request #119362 from pacoxu/add-new-eviction-pid-test
add new e2e test with PodAndContainerStatsFromCRI enabled for pid eviction order
This commit is contained in:
commit
08aefc8a92
@ -477,13 +477,22 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
|
|||||||
highPriority := int32(999999999)
|
highPriority := int32(999999999)
|
||||||
processes := 30000
|
processes := 30000
|
||||||
|
|
||||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
// if criStats is true, PodAndContainerStatsFromCRI will use data from cri instead of cadvisor for kubelet to get pid count of pods
|
||||||
|
for _, criStats := range []bool{true, false} {
|
||||||
|
ginkgo.Context(fmt.Sprintf("when we run containers with PodAndContainerStatsFromCRI=%v that should cause %s", criStats, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
pidsConsumed := int64(10000)
|
pidsConsumed := int64(10000)
|
||||||
summary := eventuallyGetSummary(ctx)
|
summary := eventuallyGetSummary(ctx)
|
||||||
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
|
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
|
||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
|
if initialConfig.FeatureGates == nil {
|
||||||
|
initialConfig.FeatureGates = make(map[string]bool)
|
||||||
|
}
|
||||||
|
if criStats {
|
||||||
|
initialConfig.FeatureGates["PodAndContainerStatsFromCRI"] = true
|
||||||
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||||
@ -513,6 +522,7 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
|
|||||||
specs[2].pod.Spec.PriorityClassName = highPriorityClassName
|
specs[2].pod.Spec.PriorityClassName = highPriorityClassName
|
||||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", func() {
|
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
|
Loading…
Reference in New Issue
Block a user