mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #91915 from tnozicka/fix-ds-recreate
Fix DS expectations on recreate
This commit is contained in:
commit
694566d06d
@ -55,6 +55,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"daemon_controller_test.go",
|
||||
"init_test.go",
|
||||
"update_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@ -74,6 +75,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
@ -82,6 +84,7 @@ go_test(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -163,17 +163,8 @@ func NewDaemonSetsController(
|
||||
}
|
||||
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
klog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
},
|
||||
AddFunc: dsc.addDaemonset,
|
||||
UpdateFunc: dsc.updateDaemonset,
|
||||
DeleteFunc: dsc.deleteDaemonset,
|
||||
})
|
||||
dsc.dsLister = daemonSetInformer.Lister()
|
||||
@ -231,22 +222,59 @@ func indexByPodNodeName(obj interface{}) ([]string, error) {
|
||||
return []string{pod.Spec.NodeName}, nil
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) addDaemonset(obj interface{}) {
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
klog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonset(cur, old interface{}) {
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
|
||||
// TODO: make a KEP and fix informers to always call the delete event handler on re-create
|
||||
if curDS.UID != oldDS.UID {
|
||||
key, err := controller.KeyFunc(oldDS)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", oldDS, err))
|
||||
return
|
||||
}
|
||||
dsc.deleteDaemonset(cache.DeletedFinalStateUnknown{
|
||||
Key: key,
|
||||
Obj: oldDS,
|
||||
})
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
ds, ok := obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
ds, ok = tombstone.Obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj))
|
||||
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a DaemonSet %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
klog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", ds, err))
|
||||
return
|
||||
}
|
||||
|
||||
// Delete expectations for the DaemonSet so if we create a new one with the same name it starts clean
|
||||
dsc.expectations.DeleteExpectations(key)
|
||||
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
// Run begins watching and syncing daemon sets.
|
||||
|
File diff suppressed because it is too large
Load Diff
25
pkg/controller/daemon/init_test.go
Normal file
25
pkg/controller/daemon/init_test.go
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
}
|
@ -34,36 +34,66 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
}
|
||||
maxUnavailable := 2
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
err = manager.dsStore.Add(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
err = manager.dsStore.Update(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 1, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
@ -75,24 +105,42 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
|
||||
}
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
err = manager.dsStore.Add(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
err = manager.dsStore.Update(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// new pods are not ready numUnavailable == maxUnavailable
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
@ -104,23 +152,41 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
}
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
err = manager.dsStore.Add(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
err = manager.dsStore.Update(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// all old pods are unavailable so should be removed
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 5, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
@ -132,17 +198,29 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
}
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
err = manager.dsStore.Add(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
err = manager.dsStore.Update(ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// template is not changed no pod should be removed
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user