Wire contexts to Apps controllers

This commit is contained in:
Mike Dame
2021-04-22 14:14:52 -04:00
parent 38746752af
commit 41fcb95f2f
36 changed files with 404 additions and 375 deletions

View File

@@ -94,7 +94,7 @@ type DaemonSetsController struct {
burstReplicas int
// To allow injection of syncDaemonSet for testing.
syncHandler func(dsKey string) error
syncHandler func(ctx context.Context, dsKey string) error
// used for unit testing
enqueueDaemonSet func(ds *apps.DaemonSet)
// A TTLCache of pod creates/deletes each ds expects to see
@@ -277,40 +277,40 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
}
// Run begins watching and syncing daemon sets.
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
defer dsc.queue.ShutDown()
klog.Infof("Starting daemon sets controller")
defer klog.Infof("Shutting down daemon sets controller")
if !cache.WaitForNamedCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
if !cache.WaitForNamedCacheSync("daemon sets", ctx.Done(), dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dsc.runWorker, time.Second, stopCh)
go wait.UntilWithContext(ctx, dsc.runWorker, time.Second)
}
go wait.Until(dsc.failedPodsBackoff.GC, BackoffGCInterval, stopCh)
go wait.Until(dsc.failedPodsBackoff.GC, BackoffGCInterval, ctx.Done())
<-stopCh
<-ctx.Done()
}
func (dsc *DaemonSetsController) runWorker() {
for dsc.processNextWorkItem() {
func (dsc *DaemonSetsController) runWorker(ctx context.Context) {
for dsc.processNextWorkItem(ctx) {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (dsc *DaemonSetsController) processNextWorkItem() bool {
func (dsc *DaemonSetsController) processNextWorkItem(ctx context.Context) bool {
dsKey, quit := dsc.queue.Get()
if quit {
return false
}
defer dsc.queue.Done(dsKey)
err := dsc.syncHandler(dsKey.(string))
err := dsc.syncHandler(ctx, dsKey.(string))
if err == nil {
dsc.queue.Forget(dsKey)
return true
@@ -711,7 +711,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned Pods are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, error) {
func (dsc *DaemonSetsController) getDaemonPods(ctx context.Context, ds *apps.DaemonSet) ([]*v1.Pod, error) {
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, err
@@ -725,8 +725,8 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
dsNotDeleted := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -738,15 +738,15 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
// Use ControllerRefManager to adopt/orphan as needed.
cm := controller.NewPodControllerRefManager(dsc.podControl, ds, selector, controllerKind, dsNotDeleted)
return cm.ClaimPods(pods)
return cm.ClaimPods(ctx, pods)
}
// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) created for the nodes.
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned Pods are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
claimedPods, err := dsc.getDaemonPods(ds)
func (dsc *DaemonSetsController) getNodesToDaemonPods(ctx context.Context, ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
claimedPods, err := dsc.getDaemonPods(ctx, ds)
if err != nil {
return nil, err
}
@@ -910,9 +910,9 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
// After figuring out which nodes should run a Pod of ds but not yet running one and
// which nodes should not run a Pod of ds but currently running one, it calls function
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
// Find out the pods which are created for the nodes by DaemonSet.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
@@ -1053,7 +1053,17 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
return utilerrors.NewAggregate(errors)
}
func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int, updateObservedGen bool) error {
func storeDaemonSetStatus(
ctx context.Context,
dsClient unversionedapps.DaemonSetInterface,
ds *apps.DaemonSet, desiredNumberScheduled,
currentNumberScheduled,
numberMisscheduled,
numberReady,
updatedNumberScheduled,
numberAvailable,
numberUnavailable int,
updateObservedGen bool) error {
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
@@ -1080,7 +1090,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
toUpdate.Status.NumberAvailable = int32(numberAvailable)
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}); updateErr == nil {
if _, updateErr = dsClient.UpdateStatus(ctx, toUpdate, metav1.UpdateOptions{}); updateErr == nil {
return nil
}
@@ -1089,7 +1099,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
break
}
// Update the set with the latest resource version for the next poll
if toUpdate, getErr = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}); getErr != nil {
if toUpdate, getErr = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}); getErr != nil {
// If the GET fails we can't trust status.Replicas anymore. This error
// is bound to be more interesting than the update failure.
return getErr
@@ -1098,9 +1108,9 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
return updateErr
}
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
klog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
@@ -1143,7 +1153,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeL
}
numberUnavailable := desiredNumberScheduled - numberAvailable
err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
err = storeDaemonSetStatus(ctx, dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
}
@@ -1155,7 +1165,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeL
return nil
}
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string) error {
startTime := dsc.failedPodsBackoff.Clock.Now()
defer func() {
@@ -1208,7 +1218,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
}
// Construct histories of the DaemonSet, and get the hash of current history
cur, old, err := dsc.constructHistory(ds)
cur, old, err := dsc.constructHistory(ctx, ds)
if err != nil {
return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
}
@@ -1216,10 +1226,10 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
if !dsc.expectations.SatisfiedExpectations(dsKey) {
// Only update status. Don't raise observedGeneration since controller didn't process object of that generation.
return dsc.updateDaemonSetStatus(ds, nodeList, hash, false)
return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, false)
}
err = dsc.manage(ds, nodeList, hash)
err = dsc.manage(ctx, ds, nodeList, hash)
if err != nil {
return err
}
@@ -1229,19 +1239,19 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ds, nodeList, hash)
err = dsc.rollingUpdate(ctx, ds, nodeList, hash)
}
if err != nil {
return err
}
}
err = dsc.cleanupHistory(ds, old)
err = dsc.cleanupHistory(ctx, ds, old)
if err != nil {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
}
return dsc.updateDaemonSetStatus(ds, nodeList, hash, true)
return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true)
}
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a

View File

@@ -383,7 +383,7 @@ func expectSyncDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.
t.Fatal("could not get key for daemon")
}
err = manager.syncHandler(key)
err = manager.syncHandler(context.TODO(), key)
if err != nil {
t.Log(err)
}
@@ -547,7 +547,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
// create of DS adds to queue, processes
waitForQueueLength(1, "created DS")
ok := dsc.processNextWorkItem()
ok := dsc.processNextWorkItem(context.TODO())
if !ok {
t.Fatal("queue is shutting down")
}
@@ -576,7 +576,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
// process updates DS, update adds to queue
waitForQueueLength(1, "updated DS")
ok = dsc.processNextWorkItem()
ok = dsc.processNextWorkItem(context.TODO())
if !ok {
t.Fatal("queue is shutting down")
}
@@ -624,7 +624,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
}
waitForQueueLength(1, "recreated DS")
ok = dsc.processNextWorkItem()
ok = dsc.processNextWorkItem(context.TODO())
if !ok {
t.Fatal("Queue is shutting down!")
}
@@ -2797,7 +2797,7 @@ func TestGetNodesToDaemonPods(t *testing.T) {
}
}
nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds)
nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds)
if err != nil {
t.Fatalf("getNodesToDaemonPods() error: %v", err)
}
@@ -3552,7 +3552,7 @@ func TestStoreDaemonSetStatus(t *testing.T) {
}
return true, ds, nil
})
if err := storeDaemonSetStatus(fakeClient.AppsV1().DaemonSets("default"), ds, 2, 2, 2, 2, 2, 2, 2, true); err != tt.expectedError {
if err := storeDaemonSetStatus(context.TODO(), fakeClient.AppsV1().DaemonSets("default"), ds, 2, 2, 2, 2, 2, 2, 2, true); err != tt.expectedError {
t.Errorf("storeDaemonSetStatus() got %v, expected %v", err, tt.expectedError)
}
if getCalled != tt.expectedGetCalled {

View File

@@ -40,8 +40,8 @@ import (
// rollingUpdate identifies the set of old pods to delete, or additional pods to create on nodes,
// remaining within the constraints imposed by the update strategy.
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
@@ -234,10 +234,10 @@ func findUpdatedPodsOnNode(ds *apps.DaemonSet, podsOnNode []*v1.Pod, hash string
// constructHistory finds all histories controlled by the given DaemonSet, and
// update current history revision number, or create current history if need to.
// It also deduplicates current history, and adds missing unique labels to existing histories.
func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
func (dsc *DaemonSetsController) constructHistory(ctx context.Context, ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
var histories []*apps.ControllerRevision
var currentHistories []*apps.ControllerRevision
histories, err = dsc.controlledHistories(ds)
histories, err = dsc.controlledHistories(ctx, ds)
if err != nil {
return nil, nil, err
}
@@ -247,7 +247,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
toUpdate := history.DeepCopy()
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
if err != nil {
return nil, nil, err
}
@@ -269,12 +269,12 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
switch len(currentHistories) {
case 0:
// Create a new history if the current one isn't found
cur, err = dsc.snapshot(ds, currRevision)
cur, err = dsc.snapshot(ctx, ds, currRevision)
if err != nil {
return nil, nil, err
}
default:
cur, err = dsc.dedupCurHistories(ds, currentHistories)
cur, err = dsc.dedupCurHistories(ctx, ds, currentHistories)
if err != nil {
return nil, nil, err
}
@@ -282,7 +282,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
if cur.Revision < currRevision {
toUpdate := cur.DeepCopy()
toUpdate.Revision = currRevision
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
if err != nil {
return nil, nil, err
}
@@ -291,8 +291,8 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
return cur, old, err
}
func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
func (dsc *DaemonSetsController) cleanupHistory(ctx context.Context, ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
@@ -323,7 +323,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.
continue
}
// Clean up
err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), history.Name, metav1.DeleteOptions{})
err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(ctx, history.Name, metav1.DeleteOptions{})
if err != nil {
return err
}
@@ -343,7 +343,7 @@ func maxRevision(histories []*apps.ControllerRevision) int64 {
return max
}
func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
func (dsc *DaemonSetsController) dedupCurHistories(ctx context.Context, ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
if len(curHistories) == 1 {
return curHistories[0], nil
}
@@ -361,7 +361,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
continue
}
// Relabel pods before dedup
pods, err := dsc.getDaemonPods(ds)
pods, err := dsc.getDaemonPods(ctx, ds)
if err != nil {
return nil, err
}
@@ -372,14 +372,14 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
toUpdate.Labels = make(map[string]string)
}
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
}
}
// Remove duplicates
err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), cur.Name, metav1.DeleteOptions{})
err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(ctx, cur.Name, metav1.DeleteOptions{})
if err != nil {
return nil, err
}
@@ -391,7 +391,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned histories are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
func (dsc *DaemonSetsController) controlledHistories(ctx context.Context, ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, err
@@ -405,8 +405,8 @@ func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*app
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -417,7 +417,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*app
})
// Use ControllerRefManager to adopt/orphan as needed.
cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
return cm.ClaimControllerRevisions(histories)
return cm.ClaimControllerRevisions(ctx, histories)
}
// Match check if the given DaemonSet's template matches the template stored in the given history.
@@ -456,7 +456,7 @@ func getPatch(ds *apps.DaemonSet) ([]byte, error) {
return patch, err
}
func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
func (dsc *DaemonSetsController) snapshot(ctx context.Context, ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
patch, err := getPatch(ds)
if err != nil {
return nil, err
@@ -475,10 +475,10 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
Revision: revision,
}
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history, metav1.CreateOptions{})
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, history, metav1.CreateOptions{})
if outerErr := err; errors.IsAlreadyExists(outerErr) {
// TODO: Is it okay to get from historyLister?
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{})
if getErr != nil {
return nil, getErr
}
@@ -493,7 +493,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
// Handle name collisions between different history
// Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
if getErr != nil {
return nil, getErr
}
@@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
currDS.Status.CollisionCount = new(int32)
}
*currDS.Status.CollisionCount++
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS, metav1.UpdateOptions{})
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(ctx, currDS, metav1.UpdateOptions{})
if updateErr != nil {
return nil, updateErr
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package daemon
import (
"context"
"testing"
"time"
@@ -323,7 +324,7 @@ func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count
func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) {
// Construct histories of the DaemonSet, and get the hash of current history
cur, _, err := dsc.constructHistory(ds)
cur, _, err := dsc.constructHistory(context.TODO(), ds)
if err != nil {
return "", err
}