diff --git a/pkg/controller/daemon/controller.go b/pkg/controller/daemon/controller.go index 41cf4af0d07..0c4907ecc34 100644 --- a/pkg/controller/daemon/controller.go +++ b/pkg/controller/daemon/controller.go @@ -177,6 +177,8 @@ func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controlle // Run begins watching and syncing daemon sets. func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { defer util.HandleCrash() + glog.Infof("Starting Daemon Sets controller manager") + controller.SyncAllPodsWithStore(dsc.kubeClient, dsc.podStore.Store) go dsc.dsController.Run(stopCh) go dsc.podController.Run(stopCh) go dsc.nodeController.Run(stopCh) @@ -461,7 +463,6 @@ func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *extensions.Dae if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled { return nil } - var updateErr, getErr error for i := 0; i <= StatusUpdateRetries; i++ { ds.Status.DesiredNumberScheduled = desiredNumberScheduled diff --git a/pkg/controller/daemon/controller_test.go b/pkg/controller/daemon/controller_test.go index d86505c8ef5..072e9e0b326 100644 --- a/pkg/controller/daemon/controller_test.go +++ b/pkg/controller/daemon/controller_test.go @@ -18,6 +18,7 @@ package daemon import ( "fmt" + "net/http/httptest" "testing" "k8s.io/kubernetes/pkg/api" @@ -27,7 +28,9 @@ import ( "k8s.io/kubernetes/pkg/client/cache" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" + utiltesting "k8s.io/kubernetes/pkg/util/testing" ) var ( @@ -342,3 +345,39 @@ func TestDSManagerNotReady(t *testing.T) { manager.podStoreSynced = alwaysReady syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) } + +func TestDSManagerInit(t *testing.T) { + // Insert a stable daemon set and make sure we don't create an extra pod + // for the one node which already has a daemon after a simulated restart. + ds := newDaemonSet("test") + ds.Status = extensions.DaemonSetStatus{ + CurrentNumberScheduled: 1, + NumberMisscheduled: 0, + DesiredNumberScheduled: 1, + } + nodeName := "only-node" + podList := &api.PodList{ + Items: []api.Pod{ + *newPod("podname", nodeName, simpleDaemonSetLabel), + }} + response := runtime.EncodeOrDie(testapi.Default.Codec(), podList) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: response, + } + testServer := httptest.NewServer(&fakeHandler) + // TODO: Uncomment when fix #19254 + // defer testServer.Close() + + client := client.NewOrDie(&client.Config{Host: testServer.URL, GroupVersion: testapi.Default.GroupVersion()}) + manager := NewDaemonSetsController(client, controller.NoResyncPeriodFunc) + manager.dsStore.Add(ds) + manager.nodeStore.Add(newNode(nodeName, nil)) + manager.podStoreSynced = alwaysReady + controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store) + + fakePodControl := &controller.FakePodControl{} + manager.podControl = fakePodControl + manager.syncHandler(getKey(ds, t)) + validateSyncDaemonSets(t, fakePodControl, 0, 0) +}