DaemonSet: Use ControllerRefManager to adopt/orphan.

This commit is contained in:
Anthony Yeh 2017-02-25 16:22:54 -08:00
parent 2217363845
commit 421e0bbd83
4 changed files with 163 additions and 103 deletions

View File

@ -73,6 +73,7 @@ go_test(
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
"//vendor:k8s.io/apimachinery/pkg/util/uuid",
"//vendor:k8s.io/apiserver/pkg/storage/names",
"//vendor:k8s.io/apiserver/pkg/util/feature",
"//vendor:k8s.io/client-go/testing",

View File

@ -465,32 +465,37 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
}
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned Pods are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
nodeToDaemonPods := make(map[string][]*v1.Pod)
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, err
}
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
// List all pods to include those that don't match the selector anymore but
// have a ControllerRef pointing to this controller.
pods, err := dsc.podLister.Pods(ds.Namespace).List(labels.Everything())
if err != nil {
return nodeToDaemonPods, err
return nil, err
}
for i := range daemonPods {
// TODO: Do we need to copy here?
daemonPod := &(*daemonPods[i])
nodeName := daemonPod.Spec.NodeName
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], daemonPod)
// Use ControllerRefManager to adopt/orphan as needed.
cm := controller.NewPodControllerRefManager(dsc.podControl, ds, selector, ControllerKind)
claimedPods, err := cm.ClaimPods(pods)
if err != nil {
return nil, err
}
// Group Pods by Node name.
nodeToDaemonPods := make(map[string][]*v1.Pod)
for _, pod := range claimedPods {
nodeName := pod.Spec.NodeName
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
}
return nodeToDaemonPods, nil
}
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
// Find out which nodes are running the daemon pods selected by ds.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) error {
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
nodeList, err := dsc.nodeLister.List(labels.Everything())
@ -589,15 +594,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
for i := 0; i < createDiff; i++ {
go func(ix int) {
defer createWait.Done()
isController := true
controllerRef := &metav1.OwnerReference{
APIVersion: controllerKind.GroupVersion().String(),
Kind: controllerKind.Kind,
Name: ds.Name,
UID: ds.UID,
Controller: &isController,
}
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds, controllerRef); err != nil {
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds, newControllerRef(ds)); err != nil {
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.CreationObserved(dsKey)
errCh <- err
@ -676,12 +673,8 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
return updateErr
}
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) error {
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) error {
glog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
@ -758,6 +751,12 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return nil
}
// Find out which nodes are running the daemon pods controlled by ds.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
// Don't process a daemon set until all its creations and deletions have been processed.
// For example if daemon set foo asked for 3 new daemon pods in the previous call to manage,
// then we do not want to call manage on foo until the daemon pods have been created.
@ -767,7 +766,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
}
dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey)
if dsNeedsSync && ds.DeletionTimestamp == nil {
if err := dsc.manage(ds); err != nil {
if err := dsc.manage(ds, nodeToDaemonPods); err != nil {
return err
}
}
@ -776,14 +775,14 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
if dsNeedsSync && ds.DeletionTimestamp == nil {
switch ds.Spec.UpdateStrategy.Type {
case extensions.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ds)
err = dsc.rollingUpdate(ds, nodeToDaemonPods)
}
if err != nil {
return err
}
}
return dsc.updateDaemonSetStatus(ds)
return dsc.updateDaemonSetStatus(ds, nodeToDaemonPods)
}
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
@ -972,6 +971,18 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
return len(predicateFails) == 0, predicateFails, nil
}
// newControllerRef creates a ControllerRef pointing to the given DaemonSet.
func newControllerRef(ds *extensions.DaemonSet) *metav1.OwnerReference {
isController := true
return &metav1.OwnerReference{
APIVersion: ControllerKind.GroupVersion().String(),
Kind: ControllerKind.Kind,
Name: ds.Name,
UID: ds.UID,
Controller: &isController,
}
}
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []*extensions.DaemonSet

View File

@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apiserver/pkg/storage/names"
utilfeature "k8s.io/apiserver/pkg/util/feature"
core "k8s.io/client-go/testing"
@ -80,6 +81,7 @@ func newDaemonSet(name string) *extensions.DaemonSet {
return &extensions.DaemonSet{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: name,
Namespace: metav1.NamespaceDefault,
},
@ -130,7 +132,7 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
}
}
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
func newPod(podName string, nodeName string, label map[string]string, ds *extensions.DaemonSet) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
@ -152,18 +154,21 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
},
}
pod.Name = names.SimpleNameGenerator.GenerateName(podName)
if ds != nil {
pod.OwnerReferences = []metav1.OwnerReference{*newControllerRef(ds)}
}
return pod
}
func addPods(podStore cache.Store, nodeName string, label map[string]string, number int) {
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
for i := 0; i < number; i++ {
podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label))
podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds))
}
}
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, number int) {
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
for i := 0; i < number; i++ {
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label)
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
pod.Status = v1.PodStatus{Phase: v1.PodFailed}
podStore.Add(pod)
}
@ -618,13 +623,13 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
func TestDealsWithExistingPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore, 0, 5, nil)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 2)
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, 5)
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, 2)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 5, nil)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2)
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5)
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
}
@ -642,34 +647,34 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
manager, podControl, _ := newTestController()
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 5, nil)
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, 2)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 1)
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, 1)
daemon := newDaemonSet("foo")
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 5, 4)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1)
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4)
}
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore, 0, 5, nil)
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 2)
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 4)
addPods(manager.podStore, "node-6", simpleDaemonSetLabel, 13)
addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, 4)
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, 1)
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, 1)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 5, nil)
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2)
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4)
addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13)
addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4)
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1)
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
}
@ -756,7 +761,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
}
func TestNumberReadyStatus(t *testing.T) {
daemon := newDaemonSet("foo")
ds := newDaemonSet("foo")
manager, podControl, clientset := newTestController()
var updated *extensions.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
@ -769,31 +774,31 @@ func TestNumberReadyStatus(t *testing.T) {
return false, nil, nil
})
addNodes(manager.nodeStore, 0, 2, simpleNodeLabel)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
manager.dsStore.Add(daemon)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
if updated.Status.NumberReady != 0 {
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
}
selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
daemonPods, _ := manager.podLister.Pods(daemon.Namespace).List(selector)
selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector)
for _, pod := range daemonPods {
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
pod.Status.Conditions = append(pod.Status.Conditions, condition)
}
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
if updated.Status.NumberReady != 2 {
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
}
}
func TestObservedGeneration(t *testing.T) {
daemon := newDaemonSet("foo")
daemon.Generation = 1
ds := newDaemonSet("foo")
ds.Generation = 1
manager, podControl, clientset := newTestController()
var updated *extensions.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
@ -807,12 +812,12 @@ func TestObservedGeneration(t *testing.T) {
})
addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
manager.dsStore.Add(daemon)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
if updated.Status.ObservedGeneration != daemon.Generation {
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, daemon.Generation, updated.Status.ObservedGeneration)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
if updated.Status.ObservedGeneration != ds.Generation {
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
}
}
@ -832,11 +837,11 @@ func TestDaemonKillFailedPods(t *testing.T) {
for _, test := range tests {
t.Logf("test case: %s\n", test.test)
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore, 0, 1, nil)
addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numFailedPods)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numNormalPods)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 1, nil)
addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods)
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods)
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
}
}
@ -1183,3 +1188,59 @@ func TestUpdateNode(t *testing.T) {
}
}
}
func TestGetNodesToDaemonPods(t *testing.T) {
manager, _, _ := newTestController()
ds := newDaemonSet("foo")
ds2 := newDaemonSet("foo2")
manager.dsStore.Add(ds)
manager.dsStore.Add(ds2)
addNodes(manager.nodeStore, 0, 2, nil)
// These pods should be returned.
wantedPods := []*v1.Pod{
newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
}
failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds)
failedPod.Status = v1.PodStatus{Phase: v1.PodFailed}
wantedPods = append(wantedPods, failedPod)
for _, pod := range wantedPods {
manager.podStore.Add(pod)
}
// These pods should be ignored.
ignoredPods := []*v1.Pod{
newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
}
for _, pod := range ignoredPods {
manager.podStore.Add(pod)
}
nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds)
if err != nil {
t.Fatalf("getNodesToDaemonPods() error: %v", err)
}
gotPods := map[string]bool{}
for node, pods := range nodesToDaemonPods {
for _, pod := range pods {
if pod.Spec.NodeName != node {
t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName)
}
gotPods[pod.Name] = true
}
}
for _, pod := range wantedPods {
if !gotPods[pod.Name] {
t.Errorf("expected pod %v but didn't get it", pod.Name)
}
delete(gotPods, pod.Name)
}
for podName := range gotPods {
t.Errorf("unexpected pod %v was returned", podName)
}
}

View File

@ -31,11 +31,9 @@ import (
// rollingUpdate deletes old daemon set pods making sure that no more than
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet) error {
newPods, oldPods, err := dsc.getAllDaemonSetPods(ds)
allPods := append(oldPods, newPods...)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, allPods)
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) error {
_, oldPods, err := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeToDaemonPods)
if err != nil {
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
}
@ -67,29 +65,23 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet) error {
return utilerrors.NewAggregate(errors)
}
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet) ([]*v1.Pod, []*v1.Pod, error) {
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) ([]*v1.Pod, []*v1.Pod, error) {
var newPods []*v1.Pod
var oldPods []*v1.Pod
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return newPods, oldPods, err
}
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
if err != nil {
return newPods, oldPods, fmt.Errorf("Couldn't get list of pods for daemon set %#v: %v", ds, err)
}
for _, pod := range daemonPods {
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod) {
newPods = append(newPods, pod)
} else {
oldPods = append(oldPods, pod)
for _, pods := range nodeToDaemonPods {
for _, pod := range pods {
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod) {
newPods = append(newPods, pod)
} else {
oldPods = append(oldPods, pod)
}
}
}
return newPods, oldPods, nil
}
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, allPods []*v1.Pod) (int, int, error) {
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
glog.V(4).Infof("Getting unavailable numbers")
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
nodeList, err := dsc.nodeLister.List(labels.Everything())
@ -97,11 +89,6 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet,
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
}
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return -1, -1, fmt.Errorf("couldn't get node to daemon pods mapping for daemon set %#v: %v", ds, err)
}
var numUnavailable, desiredNumberScheduled int
for i := range nodeList {
node := nodeList[i]