Merge pull request #100265 from ehashman/finish-100010

Migrate pkg/kubelet/kubeletconfig to structured logging
This commit is contained in:
Kubernetes Prow Robot 2021-03-16 14:50:51 -07:00 committed by GitHub
commit 045b5ddd0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 53 additions and 103 deletions

View File

@ -308,13 +308,13 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
return volumes, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr)
} else if !pathExists {
klog.Warningf("Path %q does not exist", podVolDir)
klog.InfoS("Path does not exist", "path", podVolDir)
return volumes, nil
}
volumePluginDirs, err := ioutil.ReadDir(podVolDir)
if err != nil {
klog.Errorf("Could not read directory %s: %v", podVolDir, err)
klog.ErrorS(err, "Could not read directory", "path", podVolDir)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
@ -383,7 +383,7 @@ func (kl *Kubelet) getPodVolumeSubpathListFromDisk(podUID types.UID) ([]string,
// Explicitly walks /<volume>/<container name>/<subPathIndex>
volumePluginDirs, err := ioutil.ReadDir(podSubpathsDir)
if err != nil {
klog.Errorf("Could not read directory %s: %v", podSubpathsDir, err)
klog.ErrorS(err, "Could not read directory", "path", podSubpathsDir)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {

View File

@ -42,7 +42,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Con
return nil, nil, fmt.Errorf("failed to find node object, expected a node")
}
allocatable := node.Status.Allocatable
klog.Infof("allocatable: %v", allocatable)
klog.InfoS("Allocatable", "allocatable", allocatable)
outputPod := pod.DeepCopy()
for idx := range outputPod.Spec.Containers {
resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)

View File

@ -19,6 +19,7 @@ package checkpoint
import (
"context"
"fmt"
"k8s.io/klog/v2"
"math/rand"
"time"
@ -35,7 +36,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
)
// Payload represents a local copy of a config source (payload) object
@ -178,25 +178,25 @@ func (r *remoteConfigMap) Download(client clientset.Interface, store cache.Store
)
// check the in-memory store for the ConfigMap, so we can skip unnecessary downloads
if store != nil {
utillog.Infof("checking in-memory store for %s", r.APIPath())
klog.InfoS("Kubelet config controller checking in-memory store for remoteConfigMap", "apiPath", r.APIPath())
cm, err = getConfigMapFromStore(store, r.source.ConfigMap.Namespace, r.source.ConfigMap.Name)
if err != nil {
// just log the error, we'll attempt a direct download instead
utillog.Errorf("failed to check in-memory store for %s, error: %v", r.APIPath(), err)
klog.ErrorS(err, "Kubelet config controller failed to check in-memory store for remoteConfigMap", "apiPath", r.APIPath())
} else if cm != nil {
utillog.Infof("found %s in in-memory store, UID: %s, ResourceVersion: %s", r.APIPath(), cm.UID, cm.ResourceVersion)
klog.InfoS("Kubelet config controller found remoteConfigMap in in-memory store", "apiPath", r.APIPath(), "configMapUID", cm.UID, "resourceVersion", cm.ResourceVersion)
} else {
utillog.Infof("did not find %s in in-memory store", r.APIPath())
klog.InfoS("Kubelet config controller did not find remoteConfigMap in in-memory store", "apiPath", r.APIPath())
}
}
// if we didn't find the ConfigMap in the in-memory store, download it from the API server
if cm == nil {
utillog.Infof("attempting to download %s", r.APIPath())
klog.InfoS("Kubelet config controller attempting to download remoteConfigMap", "apiPath", r.APIPath())
cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(context.TODO(), r.source.ConfigMap.Name, metav1.GetOptions{})
if err != nil {
return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err)
}
utillog.Infof("successfully downloaded %s, UID: %s, ResourceVersion: %s", r.APIPath(), cm.UID, cm.ResourceVersion)
klog.InfoS("Kubelet config controller successfully downloaded remoteConfigMap", "apiPath", r.APIPath(), "configMapUID", cm.UID, "resourceVersion", cm.ResourceVersion)
} // Assert: Now we have a non-nil ConfigMap
// construct Payload from the ConfigMap
payload, err := NewConfigMapPayload(cm)
@ -255,7 +255,7 @@ func getConfigMapFromStore(store cache.Store, namespace, name string) (*apiv1.Co
cm, ok := obj.(*apiv1.ConfigMap)
if !ok {
err := fmt.Errorf("failed to cast object %s from informer's store to ConfigMap", key)
utillog.Errorf(err.Error())
klog.ErrorS(err, "Kubelet config controller")
return nil, err
}
return cm, nil

View File

@ -18,6 +18,7 @@ package store
import (
"fmt"
"k8s.io/klog/v2"
"path/filepath"
"time"
@ -25,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
utilfiles "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
@ -56,7 +56,7 @@ func NewFsStore(fs utilfs.Filesystem, dir string) Store {
}
func (s *fsStore) Initialize() error {
utillog.Infof("initializing config checkpoints directory %q", s.dir)
klog.InfoS("Kubelet config controller initializing config checkpoints directory", "path", s.dir)
// ensure top-level dir for store
if err := utilfiles.EnsureDir(s.fs, s.dir); err != nil {
return err
@ -113,7 +113,7 @@ func (s *fsStore) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.Kub
return nil, fmt.Errorf("no checkpoint for source %s", sourceFmt)
}
// load the kubelet config file
utillog.Infof("loading Kubelet configuration checkpoint for source %s", sourceFmt)
klog.InfoS("Kubelet config controller loading Kubelet configuration checkpoint for source", "apiPath", source.APIPath(), "sourceUID", source.UID(), "resourceVersion", source.ResourceVersion())
loader, err := configfiles.NewFsLoader(s.fs, filepath.Join(s.checkpointPath(source.UID(), source.ResourceVersion()), source.KubeletFilename()))
if err != nil {
return nil, err

View File

@ -32,7 +32,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
)
const (
@ -65,7 +64,7 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
var syncerr error
defer func() {
if syncerr != nil {
utillog.Errorf(syncerr.Error())
klog.ErrorS(syncerr, "Kubelet config controller")
cc.pokeConfigSourceWorker()
}
}()
@ -80,7 +79,7 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
// a nil source simply means we reset to local defaults
if source == nil {
utillog.Infof("Node.Spec.ConfigSource is empty, will reset assigned and last-known-good to defaults")
klog.InfoS("Kubelet config controller Node.Spec.ConfigSource is empty, will reset assigned and last-known-good to defaults")
if updated, reason, err := cc.resetConfig(); err != nil {
reason = fmt.Sprintf(status.SyncErrorFmt, reason)
cc.configStatus.SetErrorOverride(reason)
@ -93,7 +92,7 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
}
// a non-nil source means we should attempt to download the config, and checkpoint it if necessary
utillog.Infof("Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary")
klog.InfoS("Kubelet config controller Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary")
// TODO(mtaufen): It would be nice if we could check the payload's metadata before (re)downloading the whole payload
// we at least try pulling the latest configmap out of the local informer store.
@ -156,7 +155,7 @@ func (cc *Controller) saveConfigCheckpoint(source checkpoint.RemoteConfigSource,
return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
}
if ok {
utillog.Infof("checkpoint already exists for %s, UID: %s, ResourceVersion: %s", source.APIPath(), payload.UID(), payload.ResourceVersion())
klog.InfoS("Kubelet config controller checkpoint already exists for source", "apiPath", source.APIPath(), "checkpointUID", payload.UID(), "resourceVersion", payload.ResourceVersion())
return "", nil
}
if err := cc.checkpointStore.Save(payload); err != nil {
@ -198,11 +197,11 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc
// we directly log and send the event, instead of using the event recorder,
// because the event recorder won't flush its queue before we exit (we'd lose the event)
event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message)
klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
klog.V(3).InfoS("Event created", "event", klog.KObj(event), "involvedObject", event.InvolvedObject, "eventType", event.Type, "reason", event.Reason, "message", event.Message)
if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event, metav1.CreateOptions{}); err != nil {
utillog.Errorf("failed to send event, error: %v", err)
klog.ErrorS(err, "Kubelet config controller failed to send event")
}
utillog.Infof(message)
klog.InfoS("Kubelet config controller event", "message", message)
os.Exit(0)
}
@ -211,17 +210,17 @@ func latestNodeConfigSource(store cache.Store, nodeName string) (*apiv1.NodeConf
obj, ok, err := store.GetByKey(nodeName)
if err != nil {
err := fmt.Errorf("failed to retrieve Node %q from informer's store, error: %v", nodeName, err)
utillog.Errorf(err.Error())
klog.ErrorS(err, "Kubelet config controller")
return nil, err
} else if !ok {
err := fmt.Errorf("node %q does not exist in the informer's store, can't sync config source", nodeName)
utillog.Errorf(err.Error())
klog.ErrorS(err, "Kubelet config controller")
return nil, err
}
node, ok := obj.(*apiv1.Node)
if !ok {
err := fmt.Errorf("failed to cast object from informer's store to Node, can't sync config source for Node %q", nodeName)
utillog.Errorf(err.Error())
klog.ErrorS(err, "Kubelet config controller")
return nil, err
}
// Copy the source, so anyone who modifies it after here doesn't mess up the informer's store!

View File

@ -18,6 +18,7 @@ package kubeletconfig
import (
"fmt"
"k8s.io/klog/v2"
"path/filepath"
"time"
@ -32,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
utilpanic "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
@ -98,7 +98,7 @@ func NewController(dynamicConfigDir string, transform TransformFunc) *Controller
// or returns an error if no valid configuration could be produced. Bootstrap should be called synchronously before StartSync.
// If the pre-existing local configuration should be used, Bootstrap returns a nil config.
func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
utillog.Infof("starting controller")
klog.InfoS("Kubelet config controller starting controller")
// ensure the filesystem is initialized
if err := cc.initializeDynamicConfigDir(); err != nil {
@ -148,7 +148,7 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
// or something else scary
// log the reason and error details for the failure to load the assigned config
utillog.Errorf(fmt.Sprintf("%s, error: %v", reason, err))
klog.ErrorS(err, "Kubelet config controller", "reason", reason)
// set status to indicate the failure with the assigned config
cc.configStatus.SetError(reason)
@ -194,7 +194,7 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
// status sync worker
statusSyncLoopFunc := utilpanic.HandlePanic(func() {
utillog.Infof("starting status sync loop")
klog.InfoS("Kubelet config controller starting status sync loop")
wait.JitterUntil(func() {
cc.configStatus.Sync(client, nodeName)
}, 10*time.Second, 0.2, true, wait.NeverStop)
@ -204,7 +204,7 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
if err != nil {
return fmt.Errorf(errFmt, err)
} else if assignedSource == nil {
utillog.Infof("local source is assigned, will not start remote config source informer")
klog.InfoS("Kubelet config controller local source is assigned, will not start remote config source informer")
} else {
cc.remoteConfigSourceInformer = assignedSource.Informer(client, cache.ResourceEventHandlerFuncs{
AddFunc: cc.onAddRemoteConfigSourceEvent,
@ -215,7 +215,7 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
}
remoteConfigSourceInformerFunc := utilpanic.HandlePanic(func() {
if cc.remoteConfigSourceInformer != nil {
utillog.Infof("starting remote config source informer")
klog.InfoS("Kubelet config controller starting remote config source informer")
cc.remoteConfigSourceInformer.Run(wait.NeverStop)
}
})
@ -223,12 +223,12 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
cc.nodeInformer = newSharedNodeInformer(client, nodeName,
cc.onAddNodeEvent, cc.onUpdateNodeEvent, cc.onDeleteNodeEvent)
nodeInformerFunc := utilpanic.HandlePanic(func() {
utillog.Infof("starting Node informer")
klog.InfoS("Kubelet config controller starting Node informer")
cc.nodeInformer.Run(wait.NeverStop)
})
// config sync worker
configSyncLoopFunc := utilpanic.HandlePanic(func() {
utillog.Infof("starting Kubelet config sync loop")
klog.InfoS("Kubelet config controller starting Kubelet config sync loop")
wait.JitterUntil(func() {
cc.syncConfigSource(client, eventClient, nodeName)
}, 10*time.Second, 0.2, true, wait.NeverStop)
@ -264,7 +264,7 @@ func (cc *Controller) loadConfig(source checkpoint.RemoteConfigSource) (*kubelet
// initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly
func (cc *Controller) initializeDynamicConfigDir() error {
utillog.Infof("ensuring filesystem is set up correctly")
klog.InfoS("Kubelet config controller ensuring filesystem is set up correctly")
// initializeDynamicConfigDir local checkpoint storage location
return cc.checkpointStore.Initialize()
}
@ -273,10 +273,10 @@ func (cc *Controller) initializeDynamicConfigDir() error {
func (cc *Controller) checkTrial(duration time.Duration) {
// when the trial period is over, the assigned config becomes the last-known-good
if trial, err := cc.inTrial(duration); err != nil {
utillog.Errorf("failed to check trial period for assigned config, error: %v", err)
klog.ErrorS(err, "Kubelet config controller failed to check trial period for assigned config")
} else if !trial {
if err := cc.graduateAssignedToLastKnownGood(); err != nil {
utillog.Errorf("failed to set last-known-good to assigned config, error: %v", err)
klog.ErrorS(err, "failed to set last-known-good to assigned config")
}
}
}
@ -319,6 +319,6 @@ func (cc *Controller) graduateAssignedToLastKnownGood() error {
}
// update the status to reflect the new last-known-good config
cc.configStatus.SetLastKnownGood(assigned.NodeConfigSource())
utillog.Infof("updated last-known-good config to %s, UID: %s, ResourceVersion: %s", assigned.APIPath(), assigned.UID(), assigned.ResourceVersion())
klog.InfoS("Kubelet config controller updated last-known-good config to new checkpointStore", "apiPath", assigned.APIPath(), "checkpointUID", assigned.UID(), "resourceVersion", assigned.ResourceVersion())
return nil
}

View File

@ -19,13 +19,13 @@ package status
import (
"context"
"fmt"
"k8s.io/klog/v2"
"sync"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
"k8s.io/kubernetes/pkg/kubelet/metrics"
nodeutil "k8s.io/kubernetes/pkg/util/node"
)
@ -143,7 +143,7 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
return
}
utillog.Infof("updating Node.Status.Config")
klog.InfoS("Kubelet config controller updating Node.Status.Config")
// grab the lock
s.mux.Lock()
@ -153,7 +153,7 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
var err error
defer func() {
if err != nil {
utillog.Errorf(err.Error())
klog.ErrorS(err, "Kubelet config controller")
s.sync()
}
}()
@ -198,6 +198,6 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
// patch the node with the new status
if _, _, err := nodeutil.PatchNodeStatus(client.CoreV1(), types.NodeName(nodeName), oldNode, newNode); err != nil {
utillog.Errorf("failed to patch node status, error: %v", err)
klog.ErrorS(err, "Kubelet config controller failed to patch node status")
}
}

View File

@ -115,7 +115,7 @@ func DecodeKubeletConfiguration(kubeletCodecs *serializer.CodecFactory, data []b
return nil, fmt.Errorf("failed lenient decoding: %v", err)
}
// Continue with the v1beta1 object that was decoded leniently, but emit a warning.
klog.Warningf("using lenient decoding as strict decoding failed: %v", err)
klog.InfoS("Using lenient decoding as strict decoding failed", "err", err)
}
internalKC, ok := obj.(*kubeletconfig.KubeletConfiguration)

View File

@ -1,49 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"fmt"
"k8s.io/klog/v2"
)
const logFmt = "kubelet config controller: %s"
// Errorf shim that inserts "kubelet config controller" at the beginning of the log message,
// while still reporting the call site of the logging function.
func Errorf(format string, args ...interface{}) {
var s string
if len(args) > 0 {
s = fmt.Sprintf(format, args...)
} else {
s = format
}
klog.ErrorDepth(1, fmt.Sprintf(logFmt, s))
}
// Infof shim that inserts "kubelet config controller" at the beginning of the log message,
// while still reporting the call site of the logging function.
func Infof(format string, args ...interface{}) {
var s string
if len(args) > 0 {
s = fmt.Sprintf(format, args...)
} else {
s = format
}
klog.InfoDepth(1, fmt.Sprintf(logFmt, s))
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package kubeletconfig
import (
"k8s.io/klog/v2"
"math/rand"
"time"
@ -26,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
)
// newSharedNodeInformer returns a shared informer that uses `client` to watch the Node with
@ -67,22 +67,22 @@ func (cc *Controller) onAddNodeEvent(newObj interface{}) {
func (cc *Controller) onUpdateNodeEvent(oldObj interface{}, newObj interface{}) {
newNode, ok := newObj.(*apiv1.Node)
if !ok {
utillog.Errorf("failed to cast new object to Node, couldn't handle event")
klog.ErrorS(nil, "Kubelet config controller failed to cast new object to Node, couldn't handle event")
return
}
if oldObj == nil {
// Node was just added, need to sync
utillog.Infof("initial Node watch event")
klog.InfoS("Kubelet config controller initial Node watch event")
cc.pokeConfigSourceWorker()
return
}
oldNode, ok := oldObj.(*apiv1.Node)
if !ok {
utillog.Errorf("failed to cast old object to Node, couldn't handle event")
klog.ErrorS(nil, "Kubelet config controller failed to cast old object to Node, couldn't handle event")
return
}
if !apiequality.Semantic.DeepEqual(oldNode.Spec.ConfigSource, newNode.Spec.ConfigSource) {
utillog.Infof("Node.Spec.ConfigSource was updated")
klog.InfoS("Kubelet config controller Node.Spec.ConfigSource was updated")
cc.pokeConfigSourceWorker()
}
}
@ -96,7 +96,7 @@ func (cc *Controller) onDeleteNodeEvent(deletedObj interface{}) {
// For this case, we just log the event.
// We don't want to poke the worker, because a temporary deletion isn't worth reporting an error for.
// If the Node is deleted because the VM is being deleted, then the Kubelet has nothing to do.
utillog.Infof("Node was deleted")
klog.InfoS("Kubelet config controller Node was deleted")
}
// onAddRemoteConfigSourceEvent calls onUpdateConfigMapEvent with the new object and a nil old object
@ -110,22 +110,22 @@ func (cc *Controller) onUpdateRemoteConfigSourceEvent(oldObj interface{}, newObj
// since ConfigMap is currently the only source type, we handle that here
newConfigMap, ok := newObj.(*apiv1.ConfigMap)
if !ok {
utillog.Errorf("failed to cast new object to ConfigMap, couldn't handle event")
klog.ErrorS(nil, "Kubelet config controller failed to cast new object to ConfigMap, couldn't handle event")
return
}
if oldObj == nil {
// ConfigMap was just added, need to sync
utillog.Infof("initial ConfigMap watch event")
klog.InfoS("Kubelet config controller initial ConfigMap watch event")
cc.pokeConfigSourceWorker()
return
}
oldConfigMap, ok := oldObj.(*apiv1.ConfigMap)
if !ok {
utillog.Errorf("failed to cast old object to ConfigMap, couldn't handle event")
klog.ErrorS(nil, "Kubelet config controller failed to cast old object to ConfigMap, couldn't handle event")
return
}
if !apiequality.Semantic.DeepEqual(oldConfigMap, newConfigMap) {
utillog.Infof("assigned ConfigMap was updated")
klog.InfoS("Kubelet config controller assigned ConfigMap was updated")
cc.pokeConfigSourceWorker()
}
}
@ -135,6 +135,6 @@ func (cc *Controller) onDeleteRemoteConfigSourceEvent(deletedObj interface{}) {
// If the ConfigMap we're watching is deleted, we log the event and poke the sync worker.
// This requires a sync, because if the Node is still configured to use the deleted ConfigMap,
// the Kubelet should report a DownloadError.
utillog.Infof("assigned ConfigMap was deleted")
klog.InfoS("Kubelet config controller assigned ConfigMap was deleted")
cc.pokeConfigSourceWorker()
}