mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #64954 from k82cn/k8s_61312_2
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md. Added unschedulable and network-unavailable toleration. Signed-off-by: Da K. Ma <klaus1982.cn@gmail.com> **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: part of #61312 fixes: https://github.com/kubernetes/kubernetes/issues/67606 **Release note**: ```release-note If `TaintNodesByCondition` is enabled, add `node.kubernetes.io/unschedulable` and `node.kubernetes.io/network-unavailable` automatically to DaemonSet pods. ```
This commit is contained in:
commit
323e1375b3
@ -16,7 +16,6 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
|
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/apis/core/v1/helper:go_default_library",
|
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/daemon/util:go_default_library",
|
"//pkg/controller/daemon/util:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
|
@ -51,7 +51,6 @@ import (
|
|||||||
"k8s.io/client-go/util/integer"
|
"k8s.io/client-go/util/integer"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
@ -1012,7 +1011,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
generation = nil
|
generation = nil
|
||||||
}
|
}
|
||||||
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
|
template := util.CreatePodTemplate(ds.Namespace, ds.Spec.Template, generation, hash)
|
||||||
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
|
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
|
||||||
// and double with each successful iteration in a kind of "slow start".
|
// and double with each successful iteration in a kind of "slow start".
|
||||||
// This handles attempts to start large numbers of pods that would
|
// This handles attempts to start large numbers of pods that would
|
||||||
@ -1039,7 +1038,6 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
|||||||
// should be no conflicting node affinity with the target node.
|
// should be no conflicting node affinity with the target node.
|
||||||
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
|
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
|
||||||
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
|
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
|
||||||
podTemplate.Spec.Tolerations = util.AppendNoScheduleTolerationIfNotExist(podTemplate.Spec.Tolerations)
|
|
||||||
|
|
||||||
err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
|
err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
|
||||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||||
@ -1289,49 +1287,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
||||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
util.AddOrUpdateDaemonPodTolerations(&newPod.Spec, kubelettypes.IsCriticalPod(newPod))
|
||||||
// Add infinite toleration for taint notReady:NoExecute here
|
|
||||||
// to survive taint-based eviction enforced by NodeController
|
|
||||||
// when node turns not ready.
|
|
||||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeNotReady,
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
Effect: v1.TaintEffectNoExecute,
|
|
||||||
})
|
|
||||||
|
|
||||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
|
||||||
// Add infinite toleration for taint unreachable:NoExecute here
|
|
||||||
// to survive taint-based eviction enforced by NodeController
|
|
||||||
// when node turns unreachable.
|
|
||||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeUnreachable,
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
Effect: v1.TaintEffectNoExecute,
|
|
||||||
})
|
|
||||||
|
|
||||||
// According to TaintNodesByCondition, all DaemonSet pods should tolerate
|
|
||||||
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
|
|
||||||
// OutOfDisk taint additional.
|
|
||||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeDiskPressure,
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
})
|
|
||||||
|
|
||||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeMemoryPressure,
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
})
|
|
||||||
|
|
||||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
|
||||||
if kubelettypes.IsCriticalPod(newPod) {
|
|
||||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeOutOfDisk,
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
|
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -437,6 +437,12 @@ func markPodReady(pod *v1.Pod) {
|
|||||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
|
||||||
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DaemonSets without node selectors should launch pods on every node.
|
// DaemonSets without node selectors should launch pods on every node.
|
||||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||||
for _, strategy := range updateStrategies() {
|
for _, strategy := range updateStrategies() {
|
||||||
@ -460,12 +466,9 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
|||||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||||
// Rollback feature gate.
|
// Rollback feature gate.
|
||||||
defer func() {
|
defer func() {
|
||||||
if !enabled {
|
setFeatureGate(t, features.ScheduleDaemonSetPods, enabled)
|
||||||
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=false")
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
|
||||||
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=true")
|
|
||||||
|
|
||||||
nodeNum := 5
|
nodeNum := 5
|
||||||
|
|
||||||
@ -1576,6 +1579,11 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
|
|||||||
// DaemonSet should launch a critical pod even when the node with OutOfDisk taints.
|
// DaemonSet should launch a critical pod even when the node with OutOfDisk taints.
|
||||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
||||||
func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||||
|
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
|
||||||
|
defer func() {
|
||||||
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
|
||||||
|
}()
|
||||||
|
|
||||||
for _, strategy := range updateStrategies() {
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
@ -1593,25 +1601,24 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
|||||||
// NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods.
|
// NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods.
|
||||||
|
|
||||||
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
|
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false)
|
||||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||||
|
|
||||||
// With enabling critical pod annotation feature gate, we will create critical pod
|
// With enabling critical pod annotation feature gate, we will create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true)
|
||||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
|
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||||
|
|
||||||
// Rollback feature gate to false.
|
|
||||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
|
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints.
|
// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints.
|
||||||
func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
enabled := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
|
||||||
|
defer func() {
|
||||||
|
setFeatureGate(t, features.TaintNodesByCondition, enabled)
|
||||||
|
}()
|
||||||
|
|
||||||
for _, strategy := range updateStrategies() {
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
@ -1633,17 +1640,19 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
|
|
||||||
// Enabling critical pod and taint nodes by condition feature gate should create critical pod
|
// Enabling critical pod and taint nodes by condition feature gate should create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
setFeatureGate(t, features.TaintNodesByCondition, true)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||||
|
|
||||||
// Rollback feature gate to false.
|
|
||||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a critical pod even when the node has insufficient free resource.
|
// DaemonSet should launch a critical pod even when the node has insufficient free resource.
|
||||||
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||||
|
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
|
||||||
|
defer func() {
|
||||||
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
|
||||||
|
}()
|
||||||
|
|
||||||
for _, strategy := range updateStrategies() {
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
@ -1663,7 +1672,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
|
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
switch strategy.Type {
|
switch strategy.Type {
|
||||||
case apps.OnDeleteDaemonSetStrategyType:
|
case apps.OnDeleteDaemonSetStrategyType:
|
||||||
@ -1675,7 +1684,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Enabling critical pod annotation feature gate should create critical pod
|
// Enabling critical pod annotation feature gate should create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true)
|
||||||
switch strategy.Type {
|
switch strategy.Type {
|
||||||
case apps.OnDeleteDaemonSetStrategyType:
|
case apps.OnDeleteDaemonSetStrategyType:
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
|
||||||
@ -1689,6 +1698,11 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
|||||||
|
|
||||||
// DaemonSets should NOT launch a critical pod when there are port conflicts.
|
// DaemonSets should NOT launch a critical pod when there are port conflicts.
|
||||||
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
||||||
|
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
|
||||||
|
defer func() {
|
||||||
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
|
||||||
|
}()
|
||||||
|
|
||||||
for _, strategy := range updateStrategies() {
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := v1.PodSpec{
|
podSpec := v1.PodSpec{
|
||||||
NodeName: "port-conflict",
|
NodeName: "port-conflict",
|
||||||
@ -1708,7 +1722,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
|||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
|
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true)
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
|
@ -19,7 +19,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
extensions "k8s.io/api/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
@ -49,16 +48,13 @@ func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
|
|||||||
return &generation, nil
|
return &generation, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatePodTemplate returns copy of provided template with additional
|
// AddOrUpdateDaemonPodTolerations apply necessary tolerations to DeamonSet Pods, e.g. node.kubernetes.io/not-ready:NoExecute.
|
||||||
// label which contains templateGeneration (for backward compatibility),
|
func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
|
||||||
// hash of provided template and sets default daemon tolerations.
|
|
||||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
|
||||||
newTemplate := *template.DeepCopy()
|
|
||||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||||
// Add infinite toleration for taint notReady:NoExecute here
|
// Add infinite toleration for taint notReady:NoExecute here
|
||||||
// to survive taint-based eviction enforced by NodeController
|
// to survive taint-based eviction enforced by NodeController
|
||||||
// when node turns not ready.
|
// when node turns not ready.
|
||||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
Key: algorithm.TaintNodeNotReady,
|
Key: algorithm.TaintNodeNotReady,
|
||||||
Operator: v1.TolerationOpExists,
|
Operator: v1.TolerationOpExists,
|
||||||
Effect: v1.TaintEffectNoExecute,
|
Effect: v1.TaintEffectNoExecute,
|
||||||
@ -68,36 +64,67 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash stri
|
|||||||
// Add infinite toleration for taint unreachable:NoExecute here
|
// Add infinite toleration for taint unreachable:NoExecute here
|
||||||
// to survive taint-based eviction enforced by NodeController
|
// to survive taint-based eviction enforced by NodeController
|
||||||
// when node turns unreachable.
|
// when node turns unreachable.
|
||||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
Key: algorithm.TaintNodeUnreachable,
|
Key: algorithm.TaintNodeUnreachable,
|
||||||
Operator: v1.TolerationOpExists,
|
Operator: v1.TolerationOpExists,
|
||||||
Effect: v1.TaintEffectNoExecute,
|
Effect: v1.TaintEffectNoExecute,
|
||||||
})
|
})
|
||||||
|
|
||||||
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
|
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
|
||||||
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
|
// MemoryPressure, DisPressure, Unschedulable and NetworkUnavailable taints,
|
||||||
// OutOfDisk taint.
|
// and the critical pods should tolerate OutOfDisk taint.
|
||||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
Key: algorithm.TaintNodeDiskPressure,
|
Key: algorithm.TaintNodeDiskPressure,
|
||||||
Operator: v1.TolerationOpExists,
|
Operator: v1.TolerationOpExists,
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
})
|
})
|
||||||
|
|
||||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
Key: algorithm.TaintNodeMemoryPressure,
|
Key: algorithm.TaintNodeMemoryPressure,
|
||||||
Operator: v1.TolerationOpExists,
|
Operator: v1.TolerationOpExists,
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
|
Key: algorithm.TaintNodeUnschedulable,
|
||||||
|
Operator: v1.TolerationOpExists,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
})
|
||||||
|
|
||||||
|
if spec.HostNetwork {
|
||||||
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
|
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||||
|
Operator: v1.TolerationOpExists,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
if isCritical {
|
||||||
kubelettypes.IsCritical(newTemplate.Namespace, newTemplate.Annotations) {
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeOutOfDisk,
|
Key: algorithm.TaintNodeOutOfDisk,
|
||||||
Operator: v1.TolerationOpExists,
|
Operator: v1.TolerationOpExists,
|
||||||
Effect: v1.TaintEffectNoExecute,
|
Effect: v1.TaintEffectNoExecute,
|
||||||
})
|
})
|
||||||
|
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||||
|
Key: algorithm.TaintNodeOutOfDisk,
|
||||||
|
Operator: v1.TolerationOpExists,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatePodTemplate returns copy of provided template with additional
|
||||||
|
// label which contains templateGeneration (for backward compatibility),
|
||||||
|
// hash of provided template and sets default daemon tolerations.
|
||||||
|
func CreatePodTemplate(ns string, template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
||||||
|
newTemplate := *template.DeepCopy()
|
||||||
|
|
||||||
|
// TODO(k82cn): when removing CritialPod feature, also remove 'ns' parameter.
|
||||||
|
isCritical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||||
|
kubelettypes.IsCritical(ns, newTemplate.Annotations)
|
||||||
|
|
||||||
|
AddOrUpdateDaemonPodTolerations(&newTemplate.Spec, isCritical)
|
||||||
|
|
||||||
if newTemplate.ObjectMeta.Labels == nil {
|
if newTemplate.ObjectMeta.Labels == nil {
|
||||||
newTemplate.ObjectMeta.Labels = make(map[string]string)
|
newTemplate.ObjectMeta.Labels = make(map[string]string)
|
||||||
@ -185,31 +212,6 @@ func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename str
|
|||||||
return affinity
|
return affinity
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
|
|
||||||
// no changes to `.spec.tolerations`.
|
|
||||||
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
|
|
||||||
unschedulableToleration := v1.Toleration{
|
|
||||||
Key: algorithm.TaintNodeUnschedulable,
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
}
|
|
||||||
|
|
||||||
unschedulableTaintExist := false
|
|
||||||
|
|
||||||
for _, t := range tolerations {
|
|
||||||
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
|
|
||||||
unschedulableTaintExist = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !unschedulableTaintExist {
|
|
||||||
tolerations = append(tolerations, unschedulableToleration)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tolerations
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
|
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
|
||||||
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
|
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
|
||||||
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
|
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
|
||||||
|
@ -154,7 +154,7 @@ func TestCreatePodTemplate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
podTemplateSpec := v1.PodTemplateSpec{}
|
podTemplateSpec := v1.PodTemplateSpec{}
|
||||||
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
|
newPodTemplate := CreatePodTemplate("", podTemplateSpec, test.templateGeneration, test.hash)
|
||||||
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||||
if !exists || val != fmt.Sprint(*test.templateGeneration) {
|
if !exists || val != fmt.Sprint(*test.templateGeneration) {
|
||||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
|
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
|
||||||
|
@ -94,6 +94,9 @@ func setupScheduler(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Enable Features.
|
||||||
|
algorithmprovider.ApplyFeatureGates()
|
||||||
|
|
||||||
schedulerConfigFactory := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
schedulerConfigFactory := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||||
SchedulerName: v1.DefaultSchedulerName,
|
SchedulerName: v1.DefaultSchedulerName,
|
||||||
Client: cs,
|
Client: cs,
|
||||||
@ -297,7 +300,8 @@ func validateDaemonSetPodsAndMarkReady(
|
|||||||
podClient corev1typed.PodInterface,
|
podClient corev1typed.PodInterface,
|
||||||
podInformer cache.SharedIndexInformer,
|
podInformer cache.SharedIndexInformer,
|
||||||
numberPods int,
|
numberPods int,
|
||||||
t *testing.T) {
|
t *testing.T,
|
||||||
|
) {
|
||||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
objects := podInformer.GetIndexer().List()
|
objects := podInformer.GetIndexer().List()
|
||||||
if len(objects) != numberPods {
|
if len(objects) != numberPods {
|
||||||
@ -484,11 +488,15 @@ func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
|
|||||||
func() {
|
func() {
|
||||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||||
defer func() {
|
defer func() {
|
||||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, f := range []bool{true, false} {
|
for _, f := range []bool{true, false} {
|
||||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
|
||||||
|
}
|
||||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -739,11 +747,15 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
|||||||
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
|
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
|
||||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||||
defer func() {
|
defer func() {
|
||||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||||
features.ScheduleDaemonSetPods, enabled))
|
features.ScheduleDaemonSetPods, enabled)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true))
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
|
||||||
|
}
|
||||||
|
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
server, closeFn, dc, informers, clientset := setup(t)
|
server, closeFn, dc, informers, clientset := setup(t)
|
||||||
@ -980,3 +992,87 @@ func TestTaintedNode(t *testing.T) {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
|
||||||
|
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
|
||||||
|
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||||
|
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
|
||||||
|
defer func() {
|
||||||
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||||
|
features.TaintNodesByCondition, enabledTaint)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
|
||||||
|
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
forEachFeatureGate(t, func(t *testing.T) {
|
||||||
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
|
server, closeFn, dc, informers, clientset := setup(t)
|
||||||
|
defer closeFn()
|
||||||
|
ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t)
|
||||||
|
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||||
|
|
||||||
|
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||||
|
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||||
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
|
stopCh := make(chan struct{})
|
||||||
|
defer close(stopCh)
|
||||||
|
|
||||||
|
informers.Start(stopCh)
|
||||||
|
go dc.Run(5, stopCh)
|
||||||
|
|
||||||
|
// Start Scheduler
|
||||||
|
setupScheduler(t, clientset, informers, stopCh)
|
||||||
|
|
||||||
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
|
ds.Spec.Template.Spec.HostNetwork = true
|
||||||
|
_, err := dsClient.Create(ds)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer cleanupDaemonSets(t, clientset, ds)
|
||||||
|
|
||||||
|
// Creates unschedulable node.
|
||||||
|
node := newNode("unschedulable-node", nil)
|
||||||
|
node.Spec.Unschedulable = true
|
||||||
|
node.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: algorithm.TaintNodeUnschedulable,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = nodeClient.Create(node)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create node: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates network-unavailable node.
|
||||||
|
nodeNU := newNode("network-unavailable-node", nil)
|
||||||
|
nodeNU.Status.Conditions = []v1.NodeCondition{
|
||||||
|
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||||
|
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
|
||||||
|
}
|
||||||
|
nodeNU.Spec.Taints = []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = nodeClient.Create(nodeNU)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create node: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
|
||||||
|
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user