mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
DaemonSet updates
It implements https://github.com/kubernetes/community/blob/master/contributors/design-proposals/daemonset-update.md Feature https://github.com/kubernetes/features/issues/124
This commit is contained in:
@@ -13,6 +13,7 @@ go_library(
|
||||
srcs = [
|
||||
"daemoncontroller.go",
|
||||
"doc.go",
|
||||
"update.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
@@ -26,6 +27,8 @@ go_library(
|
||||
"//pkg/client/listers/core/v1:go_default_library",
|
||||
"//pkg/client/listers/extensions/v1beta1:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon/util:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
@@ -35,6 +38,7 @@ go_library(
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
@@ -47,7 +51,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["daemoncontroller_test.go"],
|
||||
srcs = [
|
||||
"daemoncontroller_test.go",
|
||||
"update_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
@@ -78,6 +85,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/daemon/util:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
@@ -255,6 +256,17 @@ func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after time.Duration) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Handle overlapping controllers better. See comment in ReplicationManager.
|
||||
dsc.queue.AddAfter(key, after)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet {
|
||||
// look up in the cache, if cached and the cache is valid, just return cached value
|
||||
if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached {
|
||||
@@ -342,8 +354,14 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s updated.", curPod.Name)
|
||||
changedToReady := !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod)
|
||||
if curDS := dsc.getPodDaemonSet(curPod); curDS != nil {
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
|
||||
// See https://github.com/kubernetes/kubernetes/pull/38076 for more details
|
||||
if changedToReady && curDS.Spec.MinReadySeconds > 0 {
|
||||
dsc.enqueueDaemonSetAfter(curDS, time.Duration(curDS.Spec.MinReadySeconds)*time.Second)
|
||||
}
|
||||
}
|
||||
// If the labels have not changed, then the daemon set responsible for
|
||||
// the pod is the same as it was before. In that case we have enqueued the daemon
|
||||
@@ -521,11 +539,23 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
errors := dsc.syncNodes(ds, podsToDelete, nodesNeedingDaemonPods)
|
||||
|
||||
// Throw an error when the daemon pods fail, to use ratelimiter to prevent kill-recreate hot loop
|
||||
if failedPodsObserved > 0 {
|
||||
errors = append(errors, fmt.Errorf("deleted %d failed pods of DaemonSet %s/%s", failedPodsObserved, ds.Namespace, ds.Name))
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
// syncNodes deletes given pods and creates new daemon set pods on the given node
|
||||
// returns slice with erros if any
|
||||
func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string) []error {
|
||||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
|
||||
return []error{fmt.Errorf("couldn't get key for object %#v: %v", ds, err)}
|
||||
}
|
||||
|
||||
createDiff := len(nodesNeedingDaemonPods)
|
||||
@@ -546,10 +576,11 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
||||
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
createWait.Add(createDiff)
|
||||
template := util.GetPodTemplateWithGeneration(ds.Spec.Template, ds.TemplateGeneration)
|
||||
for i := 0; i < createDiff; i++ {
|
||||
go func(ix int) {
|
||||
defer createWait.Done()
|
||||
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &ds.Spec.Template, ds); err != nil {
|
||||
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds); err != nil {
|
||||
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
errCh <- err
|
||||
@@ -581,18 +612,17 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
||||
for err := range errCh {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
// Throw an error when the daemon pods fail, to use ratelimiter to prevent kill-recreate hot loop
|
||||
if failedPodsObserved > 0 {
|
||||
errors = append(errors, fmt.Errorf("deleted %d failed pods of DaemonSet %s/%s", failedPodsObserved, ds.Namespace, ds.Name))
|
||||
}
|
||||
return utilerrors.NewAggregate(errors)
|
||||
return errors
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int) error {
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
|
||||
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
|
||||
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
|
||||
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
|
||||
int(ds.Status.NumberReady) == numberReady &&
|
||||
int(ds.Status.UpdatedNumberScheduled) == updatedNumberScheduled &&
|
||||
int(ds.Status.NumberAvailable) == numberAvailable &&
|
||||
int(ds.Status.NumberUnavailable) == numberUnavailable &&
|
||||
ds.Status.ObservedGeneration >= ds.Generation {
|
||||
return nil
|
||||
}
|
||||
@@ -611,6 +641,9 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
|
||||
toUpdate.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
|
||||
toUpdate.Status.NumberMisscheduled = int32(numberMisscheduled)
|
||||
toUpdate.Status.NumberReady = int32(numberReady)
|
||||
toUpdate.Status.UpdatedNumberScheduled = int32(updatedNumberScheduled)
|
||||
toUpdate.Status.NumberAvailable = int32(numberAvailable)
|
||||
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
|
||||
|
||||
if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil {
|
||||
return nil
|
||||
@@ -638,7 +671,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
|
||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
|
||||
for i := range nodeList {
|
||||
node := nodeList[i]
|
||||
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
@@ -652,11 +685,18 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
desiredNumberScheduled++
|
||||
if scheduled {
|
||||
currentNumberScheduled++
|
||||
// Sort the daemon pods by creation time, so the the oldest is first.
|
||||
// Sort the daemon pods by creation time, so that the oldest is first.
|
||||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
if v1.IsPodReady(daemonPods[0]) {
|
||||
pod := daemonPods[0]
|
||||
if v1.IsPodReady(pod) {
|
||||
numberReady++
|
||||
if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
numberAvailable++
|
||||
}
|
||||
}
|
||||
if util.IsPodUpdated(ds.TemplateGeneration, pod) {
|
||||
updatedNumberScheduled++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -665,8 +705,9 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
numberUnavailable := desiredNumberScheduled - numberAvailable
|
||||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady)
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
@@ -714,6 +755,17 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
}
|
||||
|
||||
dsNeedsSync = dsc.expectations.SatisfiedExpectations(dsKey)
|
||||
if dsNeedsSync && ds.DeletionTimestamp == nil {
|
||||
switch ds.Spec.UpdateStrategy.Type {
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
err = dsc.rollingUpdate(ds)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dsc.updateDaemonSetStatus(ds)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,11 +18,13 @@ package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -166,6 +168,63 @@ func addFailedPods(podStore cache.Store, nodeName string, label map[string]strin
|
||||
}
|
||||
}
|
||||
|
||||
type fakePodControl struct {
|
||||
sync.Mutex
|
||||
*controller.FakePodControl
|
||||
podStore cache.Store
|
||||
podIDMap map[string]*v1.Pod
|
||||
}
|
||||
|
||||
func newFakePodControl() *fakePodControl {
|
||||
podIDMap := make(map[string]*v1.Pod)
|
||||
return &fakePodControl{
|
||||
FakePodControl: &controller.FakePodControl{},
|
||||
podIDMap: podIDMap}
|
||||
}
|
||||
|
||||
func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if err := f.FakePodControl.CreatePodsOnNode(nodeName, namespace, template, object); err != nil {
|
||||
return fmt.Errorf("failed to create pod on node %q", nodeName)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: template.Labels,
|
||||
Namespace: namespace,
|
||||
GenerateName: fmt.Sprintf("%s-", nodeName),
|
||||
},
|
||||
}
|
||||
|
||||
if err := api.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil {
|
||||
return fmt.Errorf("unable to convert pod template: %v", err)
|
||||
}
|
||||
if len(nodeName) != 0 {
|
||||
pod.Spec.NodeName = nodeName
|
||||
}
|
||||
pod.Name = names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-", nodeName))
|
||||
|
||||
f.podStore.Update(pod)
|
||||
f.podIDMap[pod.Name] = pod
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if err := f.FakePodControl.DeletePod(namespace, podID, object); err != nil {
|
||||
return fmt.Errorf("failed to delete pod %q", podID)
|
||||
}
|
||||
pod, ok := f.podIDMap[podID]
|
||||
if !ok {
|
||||
return fmt.Errorf("pod %q does not exist", podID)
|
||||
}
|
||||
f.podStore.Delete(pod)
|
||||
delete(f.podIDMap, podID)
|
||||
return nil
|
||||
}
|
||||
|
||||
type daemonSetsController struct {
|
||||
*DaemonSetsController
|
||||
|
||||
@@ -174,7 +233,7 @@ type daemonSetsController struct {
|
||||
nodeStore cache.Store
|
||||
}
|
||||
|
||||
func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *controller.FakePodControl, *fake.Clientset) {
|
||||
func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *fakePodControl, *fake.Clientset) {
|
||||
clientset := fake.NewSimpleClientset(initialObjects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
|
||||
@@ -190,8 +249,9 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.nodeStoreSynced = alwaysReady
|
||||
manager.dsStoreSynced = alwaysReady
|
||||
podControl := &controller.FakePodControl{}
|
||||
podControl := newFakePodControl()
|
||||
manager.podControl = podControl
|
||||
podControl.podStore = informerFactory.Core().V1().Pods().Informer().GetStore()
|
||||
|
||||
return &daemonSetsController{
|
||||
manager,
|
||||
@@ -201,7 +261,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
}, podControl, clientset
|
||||
}
|
||||
|
||||
func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
func validateSyncDaemonSets(t *testing.T, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int) {
|
||||
if len(fakePodControl.Templates) != expectedCreates {
|
||||
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
|
||||
}
|
||||
@@ -210,7 +270,7 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodCont
|
||||
}
|
||||
}
|
||||
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get key for daemon.")
|
||||
@@ -219,6 +279,18 @@ func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *
|
||||
validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes)
|
||||
}
|
||||
|
||||
// clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations.
|
||||
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, fakePodControl *fakePodControl) {
|
||||
fakePodControl.Clear()
|
||||
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get key for daemon.")
|
||||
return
|
||||
}
|
||||
manager.expectations.DeleteExpectations(key)
|
||||
}
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
manager, _, _ := newTestController()
|
||||
addNodes(manager.nodeStore, 0, 1, nil)
|
||||
@@ -231,6 +303,15 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func markPodsReady(store cache.Store) {
|
||||
// mark pods as ready
|
||||
for _, obj := range store.List() {
|
||||
pod := obj.(*v1.Pod)
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
v1.UpdatePodCondition(&pod.Status, &condition)
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets without node selectors should launch pods on every node.
|
||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
@@ -927,3 +1008,114 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
maxUnavailable := 2
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.TemplateGeneration++
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.TemplateGeneration++
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// new pods are not ready numUnavailable == maxUnavailable
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.TemplateGeneration++
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be removed
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// template is not changed no pod should be removed
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
137
pkg/controller/daemon/update.go
Normal file
137
pkg/controller/daemon/update.go
Normal file
@@ -0,0 +1,137 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
)
|
||||
|
||||
// rollingUpdate deletes old daemon set pods making sure that no more than
|
||||
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
|
||||
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet) error {
|
||||
newPods, oldPods, err := dsc.getAllDaemonSetPods(ds)
|
||||
allPods := append(oldPods, newPods...)
|
||||
|
||||
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, allPods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
|
||||
}
|
||||
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
|
||||
|
||||
// for oldPods delete all not running pods
|
||||
var podsToDelete []string
|
||||
glog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
for _, pod := range oldUnavailablePods {
|
||||
// Skip terminating pods. We won't delete them again
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Marking old pods for deletion")
|
||||
for _, pod := range oldAvailablePods {
|
||||
if numUnavailable >= maxUnavailable {
|
||||
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
break
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
numUnavailable++
|
||||
}
|
||||
errors := dsc.syncNodes(ds, podsToDelete, []string{})
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet) ([]*v1.Pod, []*v1.Pod, error) {
|
||||
var newPods []*v1.Pod
|
||||
var oldPods []*v1.Pod
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return newPods, oldPods, err
|
||||
}
|
||||
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
|
||||
if err != nil {
|
||||
return newPods, oldPods, fmt.Errorf("Couldn't get list of pods for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
for _, pod := range daemonPods {
|
||||
if util.IsPodUpdated(ds.TemplateGeneration, pod) {
|
||||
newPods = append(newPods, pod)
|
||||
} else {
|
||||
oldPods = append(oldPods, pod)
|
||||
}
|
||||
}
|
||||
return newPods, oldPods, nil
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, allPods []*v1.Pod) (int, int, error) {
|
||||
glog.V(4).Infof("Getting unavailable numbers")
|
||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("couldn't get node to daemon pods mapping for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
var numUnavailable, desiredNumberScheduled int
|
||||
for i := range nodeList {
|
||||
node := nodeList[i]
|
||||
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
if !wantToRun {
|
||||
continue
|
||||
}
|
||||
desiredNumberScheduled++
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
if !exists {
|
||||
numUnavailable++
|
||||
continue
|
||||
}
|
||||
available := false
|
||||
for _, pod := range daemonPods {
|
||||
if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
available = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !available {
|
||||
numUnavailable++
|
||||
}
|
||||
}
|
||||
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
||||
}
|
||||
return maxUnavailable, numUnavailable, nil
|
||||
}
|
||||
35
pkg/controller/daemon/update_test.go
Normal file
35
pkg/controller/daemon/update_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
)
|
||||
|
||||
// DaemonSets without node selectors should launch pods on every node.
|
||||
func TestSimpleDaemonSetUpdatesWithRollingUpdate(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
// change strategy type to RollingUpdate
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
}
|
||||
33
pkg/controller/daemon/util/BUILD
Normal file
33
pkg/controller/daemon/util/BUILD
Normal file
@@ -0,0 +1,33 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["daemonset_util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
62
pkg/controller/daemon/util/daemonset_util.go
Normal file
62
pkg/controller/daemon/util/daemonset_util.go
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// GetPodTemplateWithHash returns copy of provided template with additional
|
||||
// label which contains hash of provided template
|
||||
func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64) v1.PodTemplateSpec {
|
||||
obj, _ := api.Scheme.DeepCopy(template)
|
||||
newTemplate := obj.(v1.PodTemplateSpec)
|
||||
templateGenerationStr := fmt.Sprint(generation)
|
||||
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
template.ObjectMeta.Labels,
|
||||
extensions.DaemonSetTemplateGenerationKey,
|
||||
templateGenerationStr,
|
||||
)
|
||||
return newTemplate
|
||||
}
|
||||
|
||||
// IsPodUpdate checks if pod contains label with provided hash
|
||||
func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod) bool {
|
||||
podTemplateGeneration, generationExists := pod.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
dsTemplateGenerationStr := fmt.Sprint(dsTemplateGeneration)
|
||||
return generationExists && podTemplateGeneration == dsTemplateGenerationStr
|
||||
}
|
||||
|
||||
// SplitByAvailablePods splits provided daemon set pods by availabilty
|
||||
func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod) {
|
||||
unavailablePods := []*v1.Pod{}
|
||||
availablePods := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
if v1.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
|
||||
availablePods = append(availablePods, pod)
|
||||
} else {
|
||||
unavailablePods = append(unavailablePods, pod)
|
||||
}
|
||||
}
|
||||
return availablePods, unavailablePods
|
||||
}
|
||||
87
pkg/controller/daemon/util/daemonset_util_test.go
Normal file
87
pkg/controller/daemon/util/daemonset_util_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Name = podName
|
||||
return pod
|
||||
}
|
||||
|
||||
func TestIsPodUpdated(t *testing.T) {
|
||||
tests := []struct {
|
||||
templateGeneration int64
|
||||
pod *v1.Pod
|
||||
isUpdated bool
|
||||
}{
|
||||
{
|
||||
int64(12345),
|
||||
newPod("pod1", "node1", map[string]string{extensions.DaemonSetTemplateGenerationKey: "12345"}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
int64(12355),
|
||||
newPod("pod1", "node1", map[string]string{extensions.DaemonSetTemplateGenerationKey: "12345"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
int64(12355),
|
||||
newPod("pod1", "node1", map[string]string{}),
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
updated := IsPodUpdated(test.templateGeneration, test.pod)
|
||||
if updated != test.isUpdated {
|
||||
t.Errorf("IsPodUpdated returned wrong value. Expected %t, got %t. TemplateGeneration: %d", test.isUpdated, updated, test.templateGeneration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodTemplateWithRevision(t *testing.T) {
|
||||
generation := int64(1)
|
||||
podTemplateSpec := v1.PodTemplateSpec{}
|
||||
newPodTemplate := GetPodTemplateWithGeneration(podTemplateSpec, generation)
|
||||
label, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if !exists || label != fmt.Sprint(generation) {
|
||||
t.Errorf("Error in getting podTemplateSpec with label geneartion. Exists: %t, label: %s", exists, label)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user