mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 09:57:52 +00:00
Wire contexts to Batch controllers (#105491)
* Wire contexts to Batch controllers * (hold) feedback + updates that overlap with Apps controllers * fixup errors
This commit is contained in:
@@ -933,7 +933,7 @@ func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet,
|
||||
podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...)
|
||||
|
||||
// Label new pods using the hash label value of the current history when creating them
|
||||
if err = dsc.syncNodes(ds, podsToDelete, nodesNeedingDaemonPods, hash); err != nil {
|
||||
if err = dsc.syncNodes(ctx, ds, podsToDelete, nodesNeedingDaemonPods, hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -942,7 +942,7 @@ func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet,
|
||||
|
||||
// syncNodes deletes given pods and creates new daemon set pods on the given nodes
|
||||
// returns slice with errors if any
|
||||
func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
@@ -996,7 +996,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
|
||||
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
|
||||
|
||||
err := dsc.podControl.CreatePods(ds.Namespace, podTemplate,
|
||||
err := dsc.podControl.CreatePods(ctx, ds.Namespace, podTemplate,
|
||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
|
||||
if err != nil {
|
||||
@@ -1032,7 +1032,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
for i := 0; i < deleteDiff; i++ {
|
||||
go func(ix int) {
|
||||
defer deleteWait.Done()
|
||||
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
|
||||
if err := dsc.podControl.DeletePod(ctx, ds.Namespace, podsToDelete[ix], ds); err != nil {
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Failed deletion, decremented expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
|
@@ -254,10 +254,10 @@ func newFakePodControl() *fakePodControl {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakePodControl) CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
func (f *fakePodControl) CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if err := f.FakePodControl.CreatePods(namespace, template, object, controllerRef); err != nil {
|
||||
if err := f.FakePodControl.CreatePods(ctx, namespace, template, object, controllerRef); err != nil {
|
||||
return fmt.Errorf("failed to create pod for DaemonSet")
|
||||
}
|
||||
|
||||
@@ -282,10 +282,10 @@ func (f *fakePodControl) CreatePods(namespace string, template *v1.PodTemplateSp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
func (f *fakePodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if err := f.FakePodControl.DeletePod(namespace, podID, object); err != nil {
|
||||
if err := f.FakePodControl.DeletePod(ctx, namespace, podID, object); err != nil {
|
||||
return fmt.Errorf("failed to delete pod %q", podID)
|
||||
}
|
||||
pod, ok := f.podIDMap[podID]
|
||||
|
@@ -123,7 +123,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae
|
||||
}
|
||||
oldPodsToDelete := append(allowedReplacementPods, candidatePodsToDelete[:remainingUnavailable]...)
|
||||
|
||||
return dsc.syncNodes(ds, oldPodsToDelete, nil, hash)
|
||||
return dsc.syncNodes(ctx, ds, oldPodsToDelete, nil, hash)
|
||||
}
|
||||
|
||||
// When surging, we create new pods whenever an old pod is unavailable, and we can create up
|
||||
@@ -201,7 +201,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae
|
||||
}
|
||||
newNodesToCreate := append(allowedNewNodes, candidateNewNodes[:remainingSurge]...)
|
||||
|
||||
return dsc.syncNodes(ds, oldPodsToDelete, newNodesToCreate, hash)
|
||||
return dsc.syncNodes(ctx, ds, oldPodsToDelete, newNodesToCreate, hash)
|
||||
}
|
||||
|
||||
// findUpdatedPodsOnNode looks at non-deleted pods on a given node and returns true if there
|
||||
|
Reference in New Issue
Block a user