mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #67337 from linyouchong/pr-0813-issue67225
Automatic merge from submit-queue (batch tested with PRs 67493, 67617, 67582, 67337). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fix bug:DaemonSet didn't create pod after node have enough resource **What this PR does / why we need it**: Fix bug:DaemonSet didn't create pod after node have enough resource **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #67225 **Special notes for your reviewer**: NONE **Release note**: ```release-note DaemonSet: Fix bug- daemonset didn't create pod after node have enough resource ```
This commit is contained in:
commit
5d8a79f2e1
@ -28,6 +28,7 @@ go_library(
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -735,14 +736,19 @@ func nodeInSameCondition(old []v1.NodeCondition, cur []v1.NodeCondition) bool {
|
||||
return len(c1map) == 0
|
||||
}
|
||||
|
||||
func shouldIgnoreNodeUpdate(oldNode, curNode v1.Node) bool {
|
||||
if !nodeInSameCondition(oldNode.Status.Conditions, curNode.Status.Conditions) {
|
||||
return false
|
||||
}
|
||||
oldNode.ResourceVersion = curNode.ResourceVersion
|
||||
oldNode.Status.Conditions = curNode.Status.Conditions
|
||||
return apiequality.Semantic.DeepEqual(oldNode, curNode)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
oldNode := old.(*v1.Node)
|
||||
curNode := cur.(*v1.Node)
|
||||
|
||||
if reflect.DeepEqual(oldNode.Labels, curNode.Labels) &&
|
||||
reflect.DeepEqual(oldNode.Spec.Taints, curNode.Spec.Taints) &&
|
||||
nodeInSameCondition(oldNode.Status.Conditions, curNode.Status.Conditions) {
|
||||
// If node labels, taints and condition didn't change, we can ignore this update.
|
||||
if shouldIgnoreNodeUpdate(*oldNode, *curNode) {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2047,11 +2047,12 @@ func TestUpdateNode(t *testing.T) {
|
||||
var enqueued bool
|
||||
|
||||
cases := []struct {
|
||||
test string
|
||||
newNode *v1.Node
|
||||
oldNode *v1.Node
|
||||
ds *apps.DaemonSet
|
||||
shouldEnqueue bool
|
||||
test string
|
||||
newNode *v1.Node
|
||||
oldNode *v1.Node
|
||||
ds *apps.DaemonSet
|
||||
expectedEventsFunc func(strategyType apps.DaemonSetUpdateStrategyType) int
|
||||
shouldEnqueue bool
|
||||
}{
|
||||
{
|
||||
test: "Nothing changed, should not enqueue",
|
||||
@ -2086,6 +2087,32 @@ func TestUpdateNode(t *testing.T) {
|
||||
ds: newDaemonSet("ds"),
|
||||
shouldEnqueue: true,
|
||||
},
|
||||
{
|
||||
test: "Node Allocatable changed",
|
||||
oldNode: newNode("node1", nil),
|
||||
newNode: func() *v1.Node {
|
||||
node := newNode("node1", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
return node
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpecWithoutNodeName("200M", "200m")
|
||||
return ds
|
||||
}(),
|
||||
expectedEventsFunc: func(strategyType apps.DaemonSetUpdateStrategyType) int {
|
||||
switch strategyType {
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
return 2
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
return 3
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategyType)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
shouldEnqueue: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
for _, strategy := range updateStrategies() {
|
||||
@ -2096,7 +2123,12 @@ func TestUpdateNode(t *testing.T) {
|
||||
manager.nodeStore.Add(c.oldNode)
|
||||
c.ds.Spec.UpdateStrategy = *strategy
|
||||
manager.dsStore.Add(c.ds)
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0)
|
||||
|
||||
expectedEvents := 0
|
||||
if c.expectedEventsFunc != nil {
|
||||
expectedEvents = c.expectedEventsFunc(strategy.Type)
|
||||
}
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, expectedEvents)
|
||||
|
||||
manager.enqueueDaemonSet = func(ds *apps.DaemonSet) {
|
||||
if ds.Name == "ds" {
|
||||
|
Loading…
Reference in New Issue
Block a user