mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Implement graceful shutdown based on Pod priority
This commit is contained in:
parent
d82f606970
commit
545313bdc7
@ -267,6 +267,8 @@ var (
|
||||
"SeccompDefault",
|
||||
"SerializeImagePulls",
|
||||
"ShowHiddenMetricsForVersion",
|
||||
"ShutdownGracePeriodByPodPriority[*].Priority",
|
||||
"ShutdownGracePeriodByPodPriority[*].ShutdownGracePeriodSeconds",
|
||||
"StreamingConnectionIdleTimeout.Duration",
|
||||
"SyncFrequency.Duration",
|
||||
"SystemCgroups",
|
||||
|
@ -867,14 +867,15 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
// setup node shutdown manager
|
||||
shutdownManager, shutdownAdmitHandler := nodeshutdown.NewManager(&nodeshutdown.Config{
|
||||
ProbeManager: klet.probeManager,
|
||||
Recorder: kubeDeps.Recorder,
|
||||
NodeRef: nodeRef,
|
||||
GetPodsFunc: klet.GetActivePods,
|
||||
KillPodFunc: killPodNow(klet.podWorkers, kubeDeps.Recorder),
|
||||
SyncNodeStatusFunc: klet.syncNodeStatus,
|
||||
ShutdownGracePeriodRequested: kubeCfg.ShutdownGracePeriod.Duration,
|
||||
ShutdownGracePeriodCriticalPods: kubeCfg.ShutdownGracePeriodCriticalPods.Duration,
|
||||
ProbeManager: klet.probeManager,
|
||||
Recorder: kubeDeps.Recorder,
|
||||
NodeRef: nodeRef,
|
||||
GetPodsFunc: klet.GetActivePods,
|
||||
KillPodFunc: killPodNow(klet.podWorkers, kubeDeps.Recorder),
|
||||
SyncNodeStatusFunc: klet.syncNodeStatus,
|
||||
ShutdownGracePeriodRequested: kubeCfg.ShutdownGracePeriod.Duration,
|
||||
ShutdownGracePeriodCriticalPods: kubeCfg.ShutdownGracePeriodCriticalPods.Duration,
|
||||
ShutdownGracePeriodByPodPriority: kubeCfg.ShutdownGracePeriodByPodPriority,
|
||||
})
|
||||
klet.shutdownManager = shutdownManager
|
||||
klet.admitHandlers.AddPodAdmitHandler(shutdownAdmitHandler)
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
@ -36,15 +37,16 @@ type Manager interface {
|
||||
|
||||
// Config represents Manager configuration
|
||||
type Config struct {
|
||||
ProbeManager prober.Manager
|
||||
Recorder record.EventRecorder
|
||||
NodeRef *v1.ObjectReference
|
||||
GetPodsFunc eviction.ActivePodsFunc
|
||||
KillPodFunc eviction.KillPodFunc
|
||||
SyncNodeStatusFunc func()
|
||||
ShutdownGracePeriodRequested time.Duration
|
||||
ShutdownGracePeriodCriticalPods time.Duration
|
||||
Clock clock.Clock
|
||||
ProbeManager prober.Manager
|
||||
Recorder record.EventRecorder
|
||||
NodeRef *v1.ObjectReference
|
||||
GetPodsFunc eviction.ActivePodsFunc
|
||||
KillPodFunc eviction.KillPodFunc
|
||||
SyncNodeStatusFunc func()
|
||||
ShutdownGracePeriodRequested time.Duration
|
||||
ShutdownGracePeriodCriticalPods time.Duration
|
||||
ShutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// managerStub is a fake node shutdown managerImpl .
|
||||
|
@ -22,6 +22,7 @@ package nodeshutdown
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -29,13 +30,14 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
kubeletevents "k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@ -66,8 +68,7 @@ type managerImpl struct {
|
||||
nodeRef *v1.ObjectReference
|
||||
probeManager prober.Manager
|
||||
|
||||
shutdownGracePeriodRequested time.Duration
|
||||
shutdownGracePeriodCriticalPods time.Duration
|
||||
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
|
||||
|
||||
getPods eviction.ActivePodsFunc
|
||||
killPodFunc eviction.KillPodFunc
|
||||
@ -84,28 +85,46 @@ type managerImpl struct {
|
||||
|
||||
// NewManager returns a new node shutdown manager.
|
||||
func NewManager(conf *Config) (Manager, lifecycle.PodAdmitHandler) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdown) ||
|
||||
(conf.ShutdownGracePeriodRequested == 0 && conf.ShutdownGracePeriodCriticalPods == 0) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdown) {
|
||||
m := managerStub{}
|
||||
return m, m
|
||||
}
|
||||
|
||||
shutdownGracePeriodByPodPriority := conf.ShutdownGracePeriodByPodPriority
|
||||
// Migration from the original configuration
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdownBasedOnPodPriority) ||
|
||||
len(shutdownGracePeriodByPodPriority) == 0 {
|
||||
shutdownGracePeriodByPodPriority = migrateConfig(conf.ShutdownGracePeriodRequested, conf.ShutdownGracePeriodCriticalPods)
|
||||
}
|
||||
|
||||
// Disable if the configuration is empty
|
||||
if len(shutdownGracePeriodByPodPriority) == 0 {
|
||||
m := managerStub{}
|
||||
return m, m
|
||||
}
|
||||
|
||||
// Sort by priority from low to high
|
||||
sort.Slice(shutdownGracePeriodByPodPriority, func(i, j int) bool {
|
||||
return shutdownGracePeriodByPodPriority[i].Priority < shutdownGracePeriodByPodPriority[j].Priority
|
||||
})
|
||||
|
||||
if conf.Clock == nil {
|
||||
conf.Clock = clock.RealClock{}
|
||||
}
|
||||
manager := &managerImpl{
|
||||
probeManager: conf.ProbeManager,
|
||||
recorder: conf.Recorder,
|
||||
nodeRef: conf.NodeRef,
|
||||
getPods: conf.GetPodsFunc,
|
||||
killPodFunc: conf.KillPodFunc,
|
||||
syncNodeStatus: conf.SyncNodeStatusFunc,
|
||||
shutdownGracePeriodRequested: conf.ShutdownGracePeriodRequested,
|
||||
shutdownGracePeriodCriticalPods: conf.ShutdownGracePeriodCriticalPods,
|
||||
clock: conf.Clock,
|
||||
probeManager: conf.ProbeManager,
|
||||
recorder: conf.Recorder,
|
||||
nodeRef: conf.NodeRef,
|
||||
getPods: conf.GetPodsFunc,
|
||||
killPodFunc: conf.KillPodFunc,
|
||||
syncNodeStatus: conf.SyncNodeStatusFunc,
|
||||
shutdownGracePeriodByPodPriority: shutdownGracePeriodByPodPriority,
|
||||
clock: conf.Clock,
|
||||
}
|
||||
klog.InfoS("Creating node shutdown manager",
|
||||
"shutdownGracePeriodRequested", conf.ShutdownGracePeriodRequested,
|
||||
"shutdownGracePeriodCriticalPods", conf.ShutdownGracePeriodCriticalPods,
|
||||
"shutdownGracePeriodByPodPriority", shutdownGracePeriodByPodPriority,
|
||||
)
|
||||
return manager, manager
|
||||
}
|
||||
@ -159,9 +178,9 @@ func (m *managerImpl) start() (chan struct{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the logind's InhibitDelayMaxUSec as configured in (logind.conf) is less than shutdownGracePeriodRequested, attempt to update the value to shutdownGracePeriodRequested.
|
||||
if m.shutdownGracePeriodRequested > currentInhibitDelay {
|
||||
err := m.dbusCon.OverrideInhibitDelay(m.shutdownGracePeriodRequested)
|
||||
// If the logind's InhibitDelayMaxUSec as configured in (logind.conf) is less than periodRequested, attempt to update the value to periodRequested.
|
||||
if periodRequested := m.periodRequested(); periodRequested > currentInhibitDelay {
|
||||
err := m.dbusCon.OverrideInhibitDelay(periodRequested)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to override inhibit delay by shutdown manager: %v", err)
|
||||
}
|
||||
@ -177,8 +196,8 @@ func (m *managerImpl) start() (chan struct{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.shutdownGracePeriodRequested > updatedInhibitDelay {
|
||||
return nil, fmt.Errorf("node shutdown manager was unable to update logind InhibitDelayMaxSec to %v (ShutdownGracePeriod), current value of InhibitDelayMaxSec (%v) is less than requested ShutdownGracePeriod", m.shutdownGracePeriodRequested, updatedInhibitDelay)
|
||||
if periodRequested > updatedInhibitDelay {
|
||||
return nil, fmt.Errorf("node shutdown manager was unable to update logind InhibitDelayMaxSec to %v (ShutdownGracePeriod), current value of InhibitDelayMaxSec (%v) is less than requested ShutdownGracePeriod", periodRequested, updatedInhibitDelay)
|
||||
}
|
||||
}
|
||||
|
||||
@ -270,54 +289,54 @@ func (m *managerImpl) processShutdownEvent() error {
|
||||
klog.V(1).InfoS("Shutdown manager processing shutdown event")
|
||||
activePods := m.getPods()
|
||||
|
||||
nonCriticalPodGracePeriod := m.shutdownGracePeriodRequested - m.shutdownGracePeriodCriticalPods
|
||||
groups := groupByPriority(m.shutdownGracePeriodByPodPriority, activePods)
|
||||
for _, group := range groups {
|
||||
// If there are no pods in a particular range,
|
||||
// then do not wait for pods in that priority range.
|
||||
if len(group.Pods) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(activePods))
|
||||
for _, pod := range activePods {
|
||||
go func(pod *v1.Pod) {
|
||||
defer wg.Done()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(group.Pods))
|
||||
for _, pod := range group.Pods {
|
||||
go func(pod *v1.Pod, group podShutdownGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
var gracePeriodOverride int64
|
||||
if kubelettypes.IsCriticalPod(pod) {
|
||||
gracePeriodOverride = int64(m.shutdownGracePeriodCriticalPods.Seconds())
|
||||
m.clock.Sleep(nonCriticalPodGracePeriod)
|
||||
} else {
|
||||
gracePeriodOverride = int64(nonCriticalPodGracePeriod.Seconds())
|
||||
}
|
||||
gracePeriodOverride := group.ShutdownGracePeriodSeconds
|
||||
|
||||
// Stop probes for the pod
|
||||
m.probeManager.RemovePod(pod)
|
||||
// Stop probes for the pod
|
||||
m.probeManager.RemovePod(pod)
|
||||
|
||||
// If the pod's spec specifies a termination gracePeriod which is less than the gracePeriodOverride calculated, use the pod spec termination gracePeriod.
|
||||
if pod.Spec.TerminationGracePeriodSeconds != nil && *pod.Spec.TerminationGracePeriodSeconds <= gracePeriodOverride {
|
||||
gracePeriodOverride = *pod.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
// If the pod's spec specifies a termination gracePeriod which is less than the gracePeriodOverride calculated, use the pod spec termination gracePeriod.
|
||||
if pod.Spec.TerminationGracePeriodSeconds != nil && *pod.Spec.TerminationGracePeriodSeconds <= gracePeriodOverride {
|
||||
gracePeriodOverride = *pod.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Shutdown manager killing pod with gracePeriod", "pod", klog.KObj(pod), "gracePeriod", gracePeriodOverride)
|
||||
if err := m.killPodFunc(pod, false, &gracePeriodOverride, func(status *v1.PodStatus) {
|
||||
status.Message = nodeShutdownMessage
|
||||
status.Reason = nodeShutdownReason
|
||||
}); err != nil {
|
||||
klog.V(1).InfoS("Shutdown manager failed killing pod", "pod", klog.KObj(pod), "err", err)
|
||||
} else {
|
||||
klog.V(1).InfoS("Shutdown manager finished killing pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
}(pod)
|
||||
}
|
||||
klog.V(1).InfoS("Shutdown manager killing pod with gracePeriod", "pod", klog.KObj(pod), "gracePeriod", gracePeriodOverride)
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
wg.Wait()
|
||||
}()
|
||||
if err := m.killPodFunc(pod, false, &gracePeriodOverride, func(status *v1.PodStatus) {
|
||||
status.Message = nodeShutdownMessage
|
||||
status.Reason = nodeShutdownReason
|
||||
}); err != nil {
|
||||
klog.V(1).InfoS("Shutdown manager failed killing pod", "pod", klog.KObj(pod), "err", err)
|
||||
} else {
|
||||
klog.V(1).InfoS("Shutdown manager finished killing pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
}(pod, group)
|
||||
}
|
||||
|
||||
// We want to ensure that inhibitLock is released, so only wait up to the shutdownGracePeriodRequested timeout.
|
||||
select {
|
||||
case <-c:
|
||||
break
|
||||
case <-time.After(m.shutdownGracePeriodRequested):
|
||||
klog.V(1).InfoS("Shutdown manager pod killing time out", "gracePeriod", m.shutdownGracePeriodRequested)
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(time.Duration(group.ShutdownGracePeriodSeconds) * time.Second):
|
||||
klog.V(1).InfoS("Shutdown manager pod killing time out", "gracePeriod", group.ShutdownGracePeriodSeconds, "priority", group.Priority)
|
||||
}
|
||||
}
|
||||
|
||||
m.dbusCon.ReleaseInhibitLock(m.inhibitLock)
|
||||
@ -325,3 +344,78 @@ func (m *managerImpl) processShutdownEvent() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) periodRequested() time.Duration {
|
||||
var sum int64
|
||||
for _, period := range m.shutdownGracePeriodByPodPriority {
|
||||
sum += period.ShutdownGracePeriodSeconds
|
||||
}
|
||||
return time.Duration(sum) * time.Second
|
||||
}
|
||||
|
||||
func migrateConfig(shutdownGracePeriodRequested, shutdownGracePeriodCriticalPods time.Duration) []kubeletconfig.ShutdownGracePeriodByPodPriority {
|
||||
if shutdownGracePeriodRequested == 0 {
|
||||
return nil
|
||||
}
|
||||
defaultPriority := shutdownGracePeriodRequested - shutdownGracePeriodCriticalPods
|
||||
if defaultPriority < 0 {
|
||||
return nil
|
||||
}
|
||||
criticalPriority := shutdownGracePeriodRequested - defaultPriority
|
||||
if criticalPriority < 0 {
|
||||
return nil
|
||||
}
|
||||
return []kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
{
|
||||
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
|
||||
ShutdownGracePeriodSeconds: int64(defaultPriority / time.Second),
|
||||
},
|
||||
{
|
||||
Priority: scheduling.SystemCriticalPriority,
|
||||
ShutdownGracePeriodSeconds: int64(criticalPriority / time.Second),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func groupByPriority(shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority, pods []*v1.Pod) []podShutdownGroup {
|
||||
groups := make([]podShutdownGroup, 0, len(shutdownGracePeriodByPodPriority))
|
||||
for _, period := range shutdownGracePeriodByPodPriority {
|
||||
groups = append(groups, podShutdownGroup{
|
||||
ShutdownGracePeriodByPodPriority: period,
|
||||
})
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
var priority int32
|
||||
if pod.Spec.Priority != nil {
|
||||
priority = *pod.Spec.Priority
|
||||
}
|
||||
|
||||
// Find the group index according to the priority.
|
||||
index := sort.Search(len(groups), func(i int) bool {
|
||||
return groups[i].Priority >= priority
|
||||
})
|
||||
|
||||
// 1. Those higher than the highest priority default to the highest priority
|
||||
// 2. Those lower than the lowest priority default to the lowest priority
|
||||
// 3. Those boundary priority default to the lower priority
|
||||
// if priority of pod is:
|
||||
// groups[index-1].Priority <= pod priority < groups[index].Priority
|
||||
// in which case we want to pick lower one (i.e index-1)
|
||||
if index == len(groups) {
|
||||
index = len(groups) - 1
|
||||
} else if index < 0 {
|
||||
index = 0
|
||||
} else if index > 0 && groups[index].Priority > priority {
|
||||
index--
|
||||
}
|
||||
|
||||
groups[index].Pods = append(groups[index].Pods, pod)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
type podShutdownGroup struct {
|
||||
kubeletconfig.ShutdownGracePeriodByPodPriority
|
||||
Pods []*v1.Pod
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
pkgfeatures "k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
|
||||
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
@ -81,12 +82,7 @@ func (f *fakeDbus) OverrideInhibitDelay(inhibitDelayMax time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makePod(name string, criticalPod bool, terminationGracePeriod *int64) *v1.Pod {
|
||||
var priority int32
|
||||
if criticalPod {
|
||||
priority = scheduling.SystemCriticalPriority
|
||||
}
|
||||
|
||||
func makePod(name string, priority int32, terminationGracePeriod *int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -104,15 +100,15 @@ func TestManager(t *testing.T) {
|
||||
defer func() {
|
||||
systemDbus = systemDbusTmp
|
||||
}()
|
||||
normalPodNoGracePeriod := makePod("normal-pod-nil-grace-period", false /* criticalPod */, nil /* terminationGracePeriod */)
|
||||
criticalPodNoGracePeriod := makePod("critical-pod-nil-grace-period", true /* criticalPod */, nil /* terminationGracePeriod */)
|
||||
normalPodNoGracePeriod := makePod("normal-pod-nil-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil /* terminationGracePeriod */)
|
||||
criticalPodNoGracePeriod := makePod("critical-pod-nil-grace-period", scheduling.SystemCriticalPriority, nil /* terminationGracePeriod */)
|
||||
|
||||
shortGracePeriod := int64(2)
|
||||
normalPodGracePeriod := makePod("normal-pod-grace-period", false /* criticalPod */, &shortGracePeriod /* terminationGracePeriod */)
|
||||
criticalPodGracePeriod := makePod("critical-pod-grace-period", true /* criticalPod */, &shortGracePeriod /* terminationGracePeriod */)
|
||||
normalPodGracePeriod := makePod("normal-pod-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, &shortGracePeriod /* terminationGracePeriod */)
|
||||
criticalPodGracePeriod := makePod("critical-pod-grace-period", scheduling.SystemCriticalPriority, &shortGracePeriod /* terminationGracePeriod */)
|
||||
|
||||
longGracePeriod := int64(1000)
|
||||
normalPodLongGracePeriod := makePod("normal-pod-long-grace-period", false /* criticalPod */, &longGracePeriod /* terminationGracePeriod */)
|
||||
normalPodLongGracePeriod := makePod("normal-pod-long-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, &longGracePeriod /* terminationGracePeriod */)
|
||||
|
||||
var tests = []struct {
|
||||
desc string
|
||||
@ -256,7 +252,9 @@ func TestManager(t *testing.T) {
|
||||
lock.Unlock()
|
||||
|
||||
if tc.expectedError != nil {
|
||||
if !strings.Contains(err.Error(), tc.expectedError.Error()) {
|
||||
if err == nil {
|
||||
t.Errorf("unexpected error message. Got: <nil> want %s", tc.expectedError.Error())
|
||||
} else if !strings.Contains(err.Error(), tc.expectedError.Error()) {
|
||||
t.Errorf("unexpected error message. Got: %s want %s", err.Error(), tc.expectedError.Error())
|
||||
}
|
||||
} else {
|
||||
@ -266,7 +264,11 @@ func TestManager(t *testing.T) {
|
||||
assert.Equal(t, manager.Admit(nil).Admit, true)
|
||||
|
||||
// Send fake shutdown event
|
||||
fakeShutdownChan <- true
|
||||
select {
|
||||
case fakeShutdownChan <- true:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal()
|
||||
}
|
||||
|
||||
// Wait for all the pods to be killed
|
||||
killedPodsToGracePeriods := map[string]int64{}
|
||||
@ -413,3 +415,196 @@ func TestRestart(t *testing.T) {
|
||||
shutdownChanMut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func Test_migrateConfig(t *testing.T) {
|
||||
type shutdownConfig struct {
|
||||
shutdownGracePeriodRequested time.Duration
|
||||
shutdownGracePeriodCriticalPods time.Duration
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args shutdownConfig
|
||||
want []kubeletconfig.ShutdownGracePeriodByPodPriority
|
||||
}{
|
||||
{
|
||||
name: "both shutdownGracePeriodRequested and shutdownGracePeriodCriticalPods",
|
||||
args: shutdownConfig{
|
||||
shutdownGracePeriodRequested: 300 * time.Second,
|
||||
shutdownGracePeriodCriticalPods: 120 * time.Second,
|
||||
},
|
||||
want: []kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
{
|
||||
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
|
||||
ShutdownGracePeriodSeconds: 180,
|
||||
},
|
||||
{
|
||||
Priority: scheduling.SystemCriticalPriority,
|
||||
ShutdownGracePeriodSeconds: 120,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only shutdownGracePeriodRequested",
|
||||
args: shutdownConfig{
|
||||
shutdownGracePeriodRequested: 100 * time.Second,
|
||||
shutdownGracePeriodCriticalPods: 0 * time.Second,
|
||||
},
|
||||
want: []kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
{
|
||||
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
|
||||
ShutdownGracePeriodSeconds: 100,
|
||||
},
|
||||
{
|
||||
Priority: scheduling.SystemCriticalPriority,
|
||||
ShutdownGracePeriodSeconds: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty configuration",
|
||||
args: shutdownConfig{
|
||||
shutdownGracePeriodRequested: 0 * time.Second,
|
||||
shutdownGracePeriodCriticalPods: 0 * time.Second,
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "wrong configuration",
|
||||
args: shutdownConfig{
|
||||
shutdownGracePeriodRequested: 1 * time.Second,
|
||||
shutdownGracePeriodCriticalPods: 100 * time.Second,
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := migrateConfig(tt.args.shutdownGracePeriodRequested, tt.args.shutdownGracePeriodCriticalPods); !assert.Equal(t, tt.want, got) {
|
||||
t.Errorf("migrateConfig() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_groupByPriority(t *testing.T) {
|
||||
type args struct {
|
||||
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
|
||||
pods []*v1.Pod
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []podShutdownGroup
|
||||
}{
|
||||
{
|
||||
name: "migrate config",
|
||||
args: args{
|
||||
shutdownGracePeriodByPodPriority: migrateConfig(300*time.Second /* shutdownGracePeriodRequested */, 120*time.Second /* shutdownGracePeriodCriticalPods */),
|
||||
pods: []*v1.Pod{
|
||||
makePod("normal-pod", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil),
|
||||
makePod("highest-user-definable-pod", scheduling.HighestUserDefinablePriority, nil),
|
||||
makePod("critical-pod", scheduling.SystemCriticalPriority, nil),
|
||||
},
|
||||
},
|
||||
want: []podShutdownGroup{
|
||||
{
|
||||
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
|
||||
ShutdownGracePeriodSeconds: 180,
|
||||
},
|
||||
Pods: []*v1.Pod{
|
||||
makePod("normal-pod", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil),
|
||||
makePod("highest-user-definable-pod", scheduling.HighestUserDefinablePriority, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
Priority: scheduling.SystemCriticalPriority,
|
||||
ShutdownGracePeriodSeconds: 120,
|
||||
},
|
||||
Pods: []*v1.Pod{
|
||||
makePod("critical-pod", scheduling.SystemCriticalPriority, nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pod priority",
|
||||
args: args{
|
||||
shutdownGracePeriodByPodPriority: []kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
{
|
||||
Priority: 1,
|
||||
ShutdownGracePeriodSeconds: 10,
|
||||
},
|
||||
{
|
||||
Priority: 2,
|
||||
ShutdownGracePeriodSeconds: 20,
|
||||
},
|
||||
{
|
||||
Priority: 3,
|
||||
ShutdownGracePeriodSeconds: 30,
|
||||
},
|
||||
{
|
||||
Priority: 4,
|
||||
ShutdownGracePeriodSeconds: 40,
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
makePod("pod-0", 0, nil),
|
||||
makePod("pod-1", 1, nil),
|
||||
makePod("pod-2", 2, nil),
|
||||
makePod("pod-3", 3, nil),
|
||||
makePod("pod-4", 4, nil),
|
||||
makePod("pod-5", 5, nil),
|
||||
},
|
||||
},
|
||||
want: []podShutdownGroup{
|
||||
{
|
||||
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
Priority: 1,
|
||||
ShutdownGracePeriodSeconds: 10,
|
||||
},
|
||||
Pods: []*v1.Pod{
|
||||
makePod("pod-0", 0, nil),
|
||||
makePod("pod-1", 1, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
Priority: 2,
|
||||
ShutdownGracePeriodSeconds: 20,
|
||||
},
|
||||
Pods: []*v1.Pod{
|
||||
makePod("pod-2", 2, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
Priority: 3,
|
||||
ShutdownGracePeriodSeconds: 30,
|
||||
},
|
||||
Pods: []*v1.Pod{
|
||||
makePod("pod-3", 3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
|
||||
Priority: 4,
|
||||
ShutdownGracePeriodSeconds: 40,
|
||||
},
|
||||
Pods: []*v1.Pod{
|
||||
makePod("pod-4", 4, nil),
|
||||
makePod("pod-5", 5, nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := groupByPriority(tt.args.shutdownGracePeriodByPodPriority, tt.args.pods); !assert.Equal(t, tt.want, got) {
|
||||
t.Errorf("groupByPriority() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user