Wire contexts to Core controllers

This commit is contained in:
Mike Dame
2021-04-22 14:27:59 -04:00
parent 657412713b
commit 4960d0976a
61 changed files with 842 additions and 780 deletions

View File

@@ -61,7 +61,7 @@ type PodGCController struct {
terminatedPodThreshold int
}
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer,
func NewPodGC(ctx context.Context, kubeClient clientset.Interface, podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
@@ -76,30 +76,30 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
deletePod: func(namespace, name string) error {
klog.InfoS("PodGC is force deleting Pod", "pod", klog.KRef(namespace, name))
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
return kubeClient.CoreV1().Pods(namespace).Delete(ctx, name, *metav1.NewDeleteOptions(0))
},
}
return gcc
}
func (gcc *PodGCController) Run(stop <-chan struct{}) {
func (gcc *PodGCController) Run(ctx context.Context) {
defer utilruntime.HandleCrash()
klog.Infof("Starting GC controller")
defer gcc.nodeQueue.ShutDown()
defer klog.Infof("Shutting down GC controller")
if !cache.WaitForNamedCacheSync("GC", stop, gcc.podListerSynced, gcc.nodeListerSynced) {
if !cache.WaitForNamedCacheSync("GC", ctx.Done(), gcc.podListerSynced, gcc.nodeListerSynced) {
return
}
go wait.Until(gcc.gc, gcCheckPeriod, stop)
go wait.UntilWithContext(ctx, gcc.gc, gcCheckPeriod)
<-stop
<-ctx.Done()
}
func (gcc *PodGCController) gc() {
func (gcc *PodGCController) gc(ctx context.Context) {
pods, err := gcc.podLister.List(labels.Everything())
if err != nil {
klog.Errorf("Error while listing all pods: %v", err)
@@ -113,7 +113,7 @@ func (gcc *PodGCController) gc() {
if gcc.terminatedPodThreshold > 0 {
gcc.gcTerminated(pods)
}
gcc.gcOrphaned(pods, nodes)
gcc.gcOrphaned(ctx, pods, nodes)
gcc.gcUnscheduledTerminating(pods)
}
@@ -157,7 +157,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
}
// gcOrphaned deletes pods that are bound to nodes that don't exist.
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
func (gcc *PodGCController) gcOrphaned(ctx context.Context, pods []*v1.Pod, nodes []*v1.Node) {
klog.V(4).Infof("GC'ing orphaned")
existingNodeNames := sets.NewString()
for _, node := range nodes {
@@ -170,7 +170,7 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
}
}
// Check if nodes are still missing after quarantine period
deletedNodesNames, quit := gcc.discoverDeletedNodes(existingNodeNames)
deletedNodesNames, quit := gcc.discoverDeletedNodes(ctx, existingNodeNames)
if quit {
return
}
@@ -188,7 +188,7 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
}
}
func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String) (sets.String, bool) {
func (gcc *PodGCController) discoverDeletedNodes(ctx context.Context, existingNodeNames sets.String) (sets.String, bool) {
deletedNodesNames := sets.NewString()
for gcc.nodeQueue.Len() > 0 {
item, quit := gcc.nodeQueue.Get()
@@ -197,7 +197,7 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String)
}
nodeName := item.(string)
if !existingNodeNames.Has(nodeName) {
exists, err := gcc.checkIfNodeExists(nodeName)
exists, err := gcc.checkIfNodeExists(ctx, nodeName)
switch {
case err != nil:
klog.ErrorS(err, "Error while getting node", "node", nodeName)
@@ -211,8 +211,8 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String)
return deletedNodesNames, false
}
func (gcc *PodGCController) checkIfNodeExists(name string) (bool, error) {
_, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
func (gcc *PodGCController) checkIfNodeExists(ctx context.Context, name string) (bool, error) {
_, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if errors.IsNotFound(fetchErr) {
return false, nil
}

View File

@@ -43,7 +43,7 @@ func NewFromClient(kubeClient clientset.Interface, terminatedPodThreshold int) (
informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
podInformer := informerFactory.Core().V1().Pods()
nodeInformer := informerFactory.Core().V1().Nodes()
controller := NewPodGC(kubeClient, podInformer, nodeInformer, terminatedPodThreshold)
controller := NewPodGC(context.TODO(), kubeClient, podInformer, nodeInformer, terminatedPodThreshold)
controller.podListerSynced = alwaysReady
return controller, podInformer, nodeInformer
}
@@ -145,7 +145,7 @@ func TestGCTerminated(t *testing.T) {
})
}
gcc.gc()
gcc.gc(context.TODO())
if pass := compareStringSetToList(test.deletedPodNames, deletedPodNames); !pass {
t.Errorf("[%v]pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v",
@@ -336,7 +336,7 @@ func TestGCOrphaned(t *testing.T) {
}
// First GC of orphaned pods
gcc.gc()
gcc.gc(context.TODO())
if len(deletedPodNames) > 0 {
t.Errorf("no pods should be deleted at this point.\n\tactual: %v", deletedPodNames)
}
@@ -367,7 +367,7 @@ func TestGCOrphaned(t *testing.T) {
}
// Actual pod deletion
gcc.gc()
gcc.gc(context.TODO())
if pass := compareStringSetToList(test.deletedPodNames, deletedPodNames); !pass {
t.Errorf("pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v",