mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #63471 from ceshihao/taint_behavior_consistent
Automatic merge from submit-queue (batch tested with PRs 65032, 63471, 64104, 64672, 64427). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Make taint behavior consistent for NoSchedule **What this PR does / why we need it**: Make taint behavior consistent. If `TaintNodesByCondition ` is enable, taint node with `NotReady:NoSchedule`. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #63420 **Special notes for your reviewer**: **Release note**: ```release-note None ```
This commit is contained in:
commit
06ea14a5d6
@ -80,15 +80,40 @@ var (
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
nodeConditionToTaintKeyMap = map[v1.NodeConditionType]string{
|
||||
v1.NodeMemoryPressure: algorithm.TaintNodeMemoryPressure,
|
||||
v1.NodeOutOfDisk: algorithm.TaintNodeOutOfDisk,
|
||||
v1.NodeDiskPressure: algorithm.TaintNodeDiskPressure,
|
||||
v1.NodeNetworkUnavailable: algorithm.TaintNodeNetworkUnavailable,
|
||||
v1.NodePIDPressure: algorithm.TaintNodePIDPressure,
|
||||
nodeConditionToTaintKeyStatusMap = map[v1.NodeConditionType]struct {
|
||||
taintKey string
|
||||
// noScheduleStatus is the condition under which the node should be tainted as not schedulable for this
|
||||
// NodeConditionType
|
||||
noScheduleStatus v1.ConditionStatus
|
||||
}{
|
||||
v1.NodeReady: {
|
||||
taintKey: algorithm.TaintNodeNotReady,
|
||||
noScheduleStatus: v1.ConditionFalse,
|
||||
},
|
||||
v1.NodeMemoryPressure: {
|
||||
taintKey: algorithm.TaintNodeMemoryPressure,
|
||||
noScheduleStatus: v1.ConditionTrue,
|
||||
},
|
||||
v1.NodeOutOfDisk: {
|
||||
taintKey: algorithm.TaintNodeOutOfDisk,
|
||||
noScheduleStatus: v1.ConditionTrue,
|
||||
},
|
||||
v1.NodeDiskPressure: {
|
||||
taintKey: algorithm.TaintNodeDiskPressure,
|
||||
noScheduleStatus: v1.ConditionTrue,
|
||||
},
|
||||
v1.NodeNetworkUnavailable: {
|
||||
taintKey: algorithm.TaintNodeNetworkUnavailable,
|
||||
noScheduleStatus: v1.ConditionTrue,
|
||||
},
|
||||
v1.NodePIDPressure: {
|
||||
taintKey: algorithm.TaintNodePIDPressure,
|
||||
noScheduleStatus: v1.ConditionTrue,
|
||||
},
|
||||
}
|
||||
|
||||
taintKeyToNodeConditionMap = map[string]v1.NodeConditionType{
|
||||
algorithm.TaintNodeNotReady: v1.NodeReady,
|
||||
algorithm.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
algorithm.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
algorithm.TaintNodeOutOfDisk: v1.NodeOutOfDisk,
|
||||
@ -430,12 +455,12 @@ func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error {
|
||||
|
||||
func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error {
|
||||
// Map node's condition to Taints.
|
||||
taints := []v1.Taint{}
|
||||
var taints []v1.Taint
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if _, found := nodeConditionToTaintKeyMap[condition.Type]; found {
|
||||
if condition.Status == v1.ConditionTrue {
|
||||
if taint, found := nodeConditionToTaintKeyStatusMap[condition.Type]; found {
|
||||
if condition.Status == taint.noScheduleStatus {
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: nodeConditionToTaintKeyMap[condition.Type],
|
||||
Key: taint.taintKey,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
|
@ -2163,6 +2163,10 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
notReadyTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
@ -2271,6 +2275,30 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{networkUnavailableTaint},
|
||||
},
|
||||
{
|
||||
Name: "Ready is false",
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{notReadyTaint},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -229,7 +229,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Case 4: Schedule Pod with NetworkUnavailable toleration.
|
||||
// Case 4: Schedule Pod with NetworkUnavailable and NotReady toleration.
|
||||
networkDaemonPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "network-daemon-pod",
|
||||
@ -248,6 +248,11 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user