From a10cd79f633cafdf9114a73f5cf3298fb24fa07d Mon Sep 17 00:00:00 2001 From: Gunju Kim Date: Wed, 8 May 2024 22:40:22 +0900 Subject: [PATCH] Remove deprecated sets.String from pkg/kubelet - s/sets.String/sets.New[string]/g - s/sets.NewString/sets.New[string]/g - Use sets.List instead of (sets.String).List --- pkg/kubelet/apis/config/helpers_test.go | 30 +++++++++---------- pkg/kubelet/cm/devicemanager/manager_test.go | 4 +-- pkg/kubelet/config/config.go | 16 +++++----- pkg/kubelet/config/config_test.go | 2 +- pkg/kubelet/config/sources.go | 6 ++-- pkg/kubelet/configmap/configmap_manager.go | 4 +-- pkg/kubelet/container/helpers.go | 4 +-- pkg/kubelet/images/image_gc_manager.go | 8 ++--- pkg/kubelet/kubelet.go | 2 +- pkg/kubelet/kubelet_node_status.go | 2 +- pkg/kubelet/kubelet_pods.go | 2 +- pkg/kubelet/kubelet_test.go | 4 +-- pkg/kubelet/kubelet_volumes.go | 2 +- .../kuberuntime/kuberuntime_container.go | 4 +-- pkg/kubelet/kuberuntime/kuberuntime_gc.go | 2 +- .../kuberuntime/kuberuntime_image_test.go | 6 ++-- .../kuberuntime/kuberuntime_manager_test.go | 6 ++-- .../metrics/collectors/volume_stats.go | 2 +- pkg/kubelet/network/dns/dns_test.go | 6 ++-- pkg/kubelet/pleg/generic.go | 2 +- .../pluginmanager/plugin_manager_test.go | 2 +- pkg/kubelet/pod/testing/fake_mirror_client.go | 8 ++--- pkg/kubelet/pod_workers_test.go | 20 ++++++------- pkg/kubelet/secret/secret_manager.go | 4 +-- pkg/kubelet/server/server.go | 8 ++--- pkg/kubelet/userns/userns_manager.go | 4 +-- .../util/manager/cache_based_manager.go | 4 +-- .../util/manager/cache_based_manager_test.go | 4 +-- .../util/manager/watch_based_manager.go | 2 +- pkg/kubelet/util/queue/work_queue_test.go | 6 ++-- .../cache/desired_state_of_world.go | 8 ++--- .../reconciler/reconstruct_test.go | 6 ++-- pkg/kubelet/volumemanager/volume_manager.go | 12 ++++---- .../volumemanager/volume_manager_test.go | 2 +- pkg/kubelet/winstats/network_stats.go | 2 +- 35 files changed, 103 insertions(+), 103 deletions(-) diff --git a/pkg/kubelet/apis/config/helpers_test.go b/pkg/kubelet/apis/config/helpers_test.go index ab6907cf9a0..a58852a40a3 100644 --- a/pkg/kubelet/apis/config/helpers_test.go +++ b/pkg/kubelet/apis/config/helpers_test.go @@ -30,11 +30,11 @@ func TestKubeletConfigurationPathFields(t *testing.T) { if i := kubeletConfigurationPathFieldPaths.Intersection(kubeletConfigurationNonPathFieldPaths); len(i) > 0 { t.Fatalf("expect the intersection of kubeletConfigurationPathFieldPaths and "+ "KubeletConfigurationNonPathFields to be empty, got:\n%s", - strings.Join(i.List(), "\n")) + strings.Join(sets.List(i), "\n")) } // ensure that kubeletConfigurationPathFields U kubeletConfigurationNonPathFields == allPrimitiveFieldPaths(KubeletConfiguration) - expect := sets.NewString().Union(kubeletConfigurationPathFieldPaths).Union(kubeletConfigurationNonPathFieldPaths) + expect := sets.New[string]().Union(kubeletConfigurationPathFieldPaths).Union(kubeletConfigurationNonPathFieldPaths) result := allPrimitiveFieldPaths(t, expect, reflect.TypeOf(&KubeletConfiguration{}), nil) if !expect.Equal(result) { // expected fields missing from result @@ -46,38 +46,38 @@ func TestKubeletConfigurationPathFields(t *testing.T) { "If the field has been removed, please remove it from the kubeletConfigurationPathFieldPaths set "+ "and the KubeletConfigurationPathRefs function, "+ "or remove it from the kubeletConfigurationNonPathFieldPaths set, as appropriate:\n%s", - strings.Join(missing.List(), "\n")) + strings.Join(sets.List(missing), "\n")) } if len(unexpected) > 0 { t.Errorf("the following fields were in the result, but unexpected. "+ "If the field is new, please add it to the kubeletConfigurationPathFieldPaths set "+ "and the KubeletConfigurationPathRefs function, "+ "or add it to the kubeletConfigurationNonPathFieldPaths set, as appropriate:\n%s", - strings.Join(unexpected.List(), "\n")) + strings.Join(sets.List(unexpected), "\n")) } } } // allPrimitiveFieldPaths returns the set of field paths in type `tp`, rooted at `path`. // It recursively descends into the definition of type `tp` accumulating paths to primitive leaf fields or paths in `skipRecurseList`. -func allPrimitiveFieldPaths(t *testing.T, skipRecurseList sets.String, tp reflect.Type, path *field.Path) sets.String { +func allPrimitiveFieldPaths(t *testing.T, skipRecurseList sets.Set[string], tp reflect.Type, path *field.Path) sets.Set[string] { // if the current field path is in the list of paths we should not recurse into, // return here rather than descending and accumulating child field paths if pathStr := path.String(); len(pathStr) > 0 && skipRecurseList.Has(pathStr) { - return sets.NewString(pathStr) + return sets.New[string](pathStr) } - paths := sets.NewString() + paths := sets.New[string]() switch tp.Kind() { case reflect.Pointer: - paths.Insert(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path).List()...) + paths.Insert(sets.List(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path))...) case reflect.Struct: for i := 0; i < tp.NumField(); i++ { field := tp.Field(i) - paths.Insert(allPrimitiveFieldPaths(t, skipRecurseList, field.Type, path.Child(field.Name)).List()...) + paths.Insert(sets.List(allPrimitiveFieldPaths(t, skipRecurseList, field.Type, path.Child(field.Name)))...) } case reflect.Map, reflect.Slice: - paths.Insert(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path.Key("*")).List()...) + paths.Insert(sets.List(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path.Key("*")))...) case reflect.Interface: t.Fatalf("unexpected interface{} field %s", path.String()) default: @@ -115,7 +115,7 @@ type bar struct { } func TestAllPrimitiveFieldPaths(t *testing.T) { - expect := sets.NewString( + expect := sets.New[string]( "str", "strptr", "ints[*]", @@ -140,17 +140,17 @@ func TestAllPrimitiveFieldPaths(t *testing.T) { unexpected := result.Difference(expect) if len(missing) > 0 { - t.Errorf("the following fields were expected, but missing from the result:\n%s", strings.Join(missing.List(), "\n")) + t.Errorf("the following fields were expected, but missing from the result:\n%s", strings.Join(sets.List(missing), "\n")) } if len(unexpected) > 0 { - t.Errorf("the following fields were in the result, but unexpected:\n%s", strings.Join(unexpected.List(), "\n")) + t.Errorf("the following fields were in the result, but unexpected:\n%s", strings.Join(sets.List(unexpected), "\n")) } } } var ( // KubeletConfiguration fields that contain file paths. If you update this, also update KubeletConfigurationPathRefs! - kubeletConfigurationPathFieldPaths = sets.NewString( + kubeletConfigurationPathFieldPaths = sets.New[string]( "StaticPodPath", "Authentication.X509.ClientCAFile", "TLSCertFile", @@ -160,7 +160,7 @@ var ( ) // KubeletConfiguration fields that do not contain file paths. - kubeletConfigurationNonPathFieldPaths = sets.NewString( + kubeletConfigurationNonPathFieldPaths = sets.New[string]( "Address", "AllowedUnsafeSysctls[*]", "Authentication.Anonymous.Enabled", diff --git a/pkg/kubelet/cm/devicemanager/manager_test.go b/pkg/kubelet/cm/devicemanager/manager_test.go index 6b5ec623c28..5f9efd37f3d 100644 --- a/pkg/kubelet/cm/devicemanager/manager_test.go +++ b/pkg/kubelet/cm/devicemanager/manager_test.go @@ -312,8 +312,8 @@ func setupPluginManager(t *testing.T, pluginSocketName string, m Manager) plugin } func runPluginManager(pluginManager pluginmanager.PluginManager) { - // FIXME: Replace sets.String with sets.Set[string] - sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) + // FIXME: Replace sets.Set[string] with sets.Set[string] + sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true }) go pluginManager.Run(sourcesReady, wait.NeverStop) } diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index f6e31189f0e..34fde1a1038 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -67,7 +67,7 @@ type PodConfig struct { // contains the list of all configured sources sourcesLock sync.Mutex - sources sets.String + sources sets.Set[string] } // NewPodConfig creates an object that can merge many configuration sources into a stream @@ -79,7 +79,7 @@ func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder, pods: storage, mux: newMux(storage), updates: updates, - sources: sets.String{}, + sources: sets.Set[string]{}, } return podConfig } @@ -95,14 +95,14 @@ func (c *PodConfig) Channel(ctx context.Context, source string) chan<- interface // SeenAllSources returns true if seenSources contains all sources in the // config, and also this config has received a SET message from each source. -func (c *PodConfig) SeenAllSources(seenSources sets.String) bool { +func (c *PodConfig) SeenAllSources(seenSources sets.Set[string]) bool { if c.pods == nil { return false } c.sourcesLock.Lock() defer c.sourcesLock.Unlock() - klog.V(5).InfoS("Looking for sources, have seen", "sources", c.sources.List(), "seenSources", seenSources) - return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...) + klog.V(5).InfoS("Looking for sources, have seen", "sources", sets.List(c.sources), "seenSources", seenSources) + return seenSources.HasAll(sets.List(c.sources)...) && c.pods.seenSources(sets.List(c.sources)...) } // Updates returns a channel of updates to the configuration, properly denormalized. @@ -132,7 +132,7 @@ type podStorage struct { // contains the set of all sources that have sent at least one SET sourcesSeenLock sync.RWMutex - sourcesSeen sets.String + sourcesSeen sets.Set[string] // the EventRecorder to use recorder record.EventRecorder @@ -148,7 +148,7 @@ func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificatio pods: make(map[string]map[types.UID]*v1.Pod), mode: mode, updates: updates, - sourcesSeen: sets.String{}, + sourcesSeen: sets.Set[string]{}, recorder: recorder, startupSLIObserver: startupSLIObserver, } @@ -331,7 +331,7 @@ func (s *podStorage) seenSources(sources ...string) bool { } func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) { - names := sets.String{} + names := sets.Set[string]{} for i, pod := range pods { // Pods from each source are assumed to have passed validation individually. // This function only checks if there is any naming conflict. diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index 0049cfd464e..b2e652c6c2e 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -455,7 +455,7 @@ func TestPodConfigRace(t *testing.T) { eventBroadcaster := record.NewBroadcaster(record.WithContext(tCtx)) config := NewPodConfig(PodConfigNotificationIncremental, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"}), &mockPodStartupSLIObserver{}) - seenSources := sets.NewString(TestSource) + seenSources := sets.New[string](TestSource) var wg sync.WaitGroup const iterations = 100 wg.Add(2) diff --git a/pkg/kubelet/config/sources.go b/pkg/kubelet/config/sources.go index a8ac11eb04f..f1ce89adde7 100644 --- a/pkg/kubelet/config/sources.go +++ b/pkg/kubelet/config/sources.go @@ -24,7 +24,7 @@ import ( ) // SourcesReadyFn is function that returns true if the specified sources have been seen. -type SourcesReadyFn func(sourcesSeen sets.String) bool +type SourcesReadyFn func(sourcesSeen sets.Set[string]) bool // SourcesReady tracks the set of configured sources seen by the kubelet. type SourcesReady interface { @@ -37,7 +37,7 @@ type SourcesReady interface { // NewSourcesReady returns a SourcesReady with the specified function. func NewSourcesReady(sourcesReadyFn SourcesReadyFn) SourcesReady { return &sourcesImpl{ - sourcesSeen: sets.NewString(), + sourcesSeen: sets.New[string](), sourcesReadyFn: sourcesReadyFn, } } @@ -47,7 +47,7 @@ type sourcesImpl struct { // lock protects access to sources seen. lock sync.RWMutex // set of sources seen. - sourcesSeen sets.String + sourcesSeen sets.Set[string] // sourcesReady is a function that evaluates if the sources are ready. sourcesReadyFn SourcesReadyFn } diff --git a/pkg/kubelet/configmap/configmap_manager.go b/pkg/kubelet/configmap/configmap_manager.go index 0f276c083ec..08bc9674668 100644 --- a/pkg/kubelet/configmap/configmap_manager.go +++ b/pkg/kubelet/configmap/configmap_manager.go @@ -98,8 +98,8 @@ func (c *configMapManager) UnregisterPod(pod *v1.Pod) { c.manager.UnregisterPod(pod) } -func getConfigMapNames(pod *v1.Pod) sets.String { - result := sets.NewString() +func getConfigMapNames(pod *v1.Pod) sets.Set[string] { + result := sets.New[string]() podutil.VisitPodConfigmapNames(pod, func(name string) bool { result.Insert(name) return true diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 7f8963ab6a1..e0b8e052829 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -164,7 +164,7 @@ func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVa func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) { envmap := envVarsToMap(envs) - missingKeys := sets.NewString() + missingKeys := sets.New[string]() expanded := expansion.Expand(mount.SubPathExpr, func(key string) string { value, ok := envmap[key] if !ok || len(value) == 0 { @@ -174,7 +174,7 @@ func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, e }) if len(missingKeys) > 0 { - return "", fmt.Errorf("missing value for %s", strings.Join(missingKeys.List(), ", ")) + return "", fmt.Errorf("missing value for %s", strings.Join(sets.List(missingKeys), ", ")) } return expanded, nil } diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index bb8a7954002..e18618d95a4 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -232,9 +232,9 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) { return im.imageCache.get(), nil } -func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.String, error) { +func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.Set[string], error) { isRuntimeClassInImageCriAPIEnabled := utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) - imagesInUse := sets.NewString() + imagesInUse := sets.New[string]() images, err := im.runtime.ListImages(ctx) if err != nil { @@ -261,7 +261,7 @@ func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time. // Add new images and record those being used. now := time.Now() - currentImages := sets.NewString() + currentImages := sets.New[string]() im.imageRecordsLock.Lock() defer im.imageRecordsLock.Unlock() for _, image := range images { @@ -554,7 +554,7 @@ func (ev byLastUsedAndDetected) Less(i, j int) bool { return ev[i].lastUsed.Before(ev[j].lastUsed) } -func isImageUsed(imageID string, imagesInUse sets.String) bool { +func isImageUsed(imageID string, imagesInUse sets.Set[string]) bool { // Check the image ID. if _, ok := imagesInUse[imageID]; ok { return true diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index b9530aefa43..3c12edc4859 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2230,7 +2230,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus func (kl *Kubelet) getPodsToSync() []*v1.Pod { allPods := kl.podManager.GetPods() podUIDs := kl.workQueue.GetWork() - podUIDSet := sets.NewString() + podUIDSet := sets.New[string]() for _, podUID := range podUIDs { podUIDSet.Insert(string(podUID)) } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 140420d6361..ca5e732ea38 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -130,7 +130,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { // reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool { requiresUpdate := updateDefaultResources(initialNode, existingNode) - supportedHugePageResources := sets.String{} + supportedHugePageResources := sets.Set[string]{} for resourceName := range initialNode.Status.Capacity { if !v1helper.IsHugePageResourceName(resourceName) { diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 184ea654092..aa38412142b 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -631,7 +631,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, return opts, cleanupAction, nil } -var masterServices = sets.NewString("kubernetes") +var masterServices = sets.New[string]("kubernetes") // getServiceEnvVarMap makes a map[string]string of env vars for services a // pod in namespace ns should see. diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 7f6b1ed71f0..a703f9babb5 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -209,7 +209,7 @@ func newTestKubeletWithImageList( kubelet.runtimeState.setNetworkState(nil) kubelet.rootDirectory = t.TempDir() kubelet.podLogsDirectory = t.TempDir() - kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true }) + kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.Set[string]) bool { return true }) kubelet.serviceLister = testServiceLister{} kubelet.serviceHasSynced = func() bool { return true } kubelet.nodeHasSynced = func() bool { return true } @@ -728,7 +728,7 @@ func TestHandlePodRemovesWhenSourcesAreReady(t *testing.T) { {Pod: fakePod}, } kubelet := testKubelet.kubelet - kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready }) + kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.Set[string]) bool { return ready }) kubelet.HandlePodRemoves(pods) time.Sleep(2 * time.Second) diff --git a/pkg/kubelet/kubelet_volumes.go b/pkg/kubelet/kubelet_volumes.go index 27c82ae430e..72635fcfe2e 100644 --- a/pkg/kubelet/kubelet_volumes.go +++ b/pkg/kubelet/kubelet_volumes.go @@ -169,7 +169,7 @@ func (kl *Kubelet) removeOrphanedPodVolumeDirs(uid types.UID) []error { // cleanupOrphanedPodDirs removes the volumes of pods that should not be // running and that have no containers running. Note that we roll up logs here since it runs in the main loop. func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error { - allPods := sets.NewString() + allPods := sets.New[string]() for _, pod := range pods { allPods.Insert(string(pod.UID)) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 0267b37cc7b..fce3571141f 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -846,7 +846,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) { // only the last execution of each init container should be preserved, and only preserve it if it is in the // list of init containers to keep. - initContainerNames := sets.NewString() + initContainerNames := sets.New[string]() for _, container := range pod.Spec.InitContainers { initContainerNames.Insert(container.Name) } @@ -880,7 +880,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.C // of the container because it assumes all init containers have been stopped // before the call happens. func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) { - initContainerNames := sets.NewString() + initContainerNames := sets.New[string]() for _, container := range pod.Spec.InitContainers { initContainerNames.Insert(container.Name) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go index c6f8856213e..6189b1f07ca 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -288,7 +288,7 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods } // collect all the PodSandboxId of container - sandboxIDs := sets.NewString() + sandboxIDs := sets.New[string]() for _, container := range containers { sandboxIDs.Insert(container.PodSandboxId) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go index 4bdac2ff429..4596ad469b6 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go @@ -88,17 +88,17 @@ func TestListImages(t *testing.T) { assert.NoError(t, err) images := []string{"1111", "2222", "3333"} - expected := sets.NewString(images...) + expected := sets.New[string](images...) fakeImageService.SetFakeImages(images) actualImages, err := fakeManager.ListImages(ctx) assert.NoError(t, err) - actual := sets.NewString() + actual := sets.New[string]() for _, i := range actualImages { actual.Insert(i.ID) } - assert.Equal(t, expected.List(), actual.List()) + assert.Equal(t, sets.List(expected), sets.List(actual)) } func TestListImagesPinnedField(t *testing.T) { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 14ff9118690..d512b45b727 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -246,8 +246,8 @@ func verifyPods(a, b []*kubecontainer.Pod) bool { return reflect.DeepEqual(a, b) } -func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.String) (sets.String, bool) { - actual := sets.NewString() +func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.Set[string]) (sets.Set[string], bool) { + actual := sets.New[string]() for _, c := range fakeRuntime.Containers { actual.Insert(c.Id) } @@ -741,7 +741,7 @@ func TestPruneInitContainers(t *testing.T) { assert.NoError(t, err) m.pruneInitContainersBeforeStart(ctx, pod, podStatus) - expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id) + expectedContainers := sets.New[string](fakes[0].Id, fakes[2].Id) if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { t.Errorf("expected %v, got %v", expectedContainers, actual) } diff --git a/pkg/kubelet/metrics/collectors/volume_stats.go b/pkg/kubelet/metrics/collectors/volume_stats.go index ddcb308de01..b565bf1d376 100644 --- a/pkg/kubelet/metrics/collectors/volume_stats.go +++ b/pkg/kubelet/metrics/collectors/volume_stats.go @@ -107,7 +107,7 @@ func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Me lv = append([]string{pvcRef.Namespace, pvcRef.Name}, lv...) ch <- metrics.NewLazyConstMetric(desc, metrics.GaugeValue, v, lv...) } - allPVCs := sets.String{} + allPVCs := sets.Set[string]{} for _, podStat := range podStats { if podStat.VolumeStats == nil { continue diff --git a/pkg/kubelet/network/dns/dns_test.go b/pkg/kubelet/network/dns/dns_test.go index 1cf9e20fa0f..a6b7ec498bb 100644 --- a/pkg/kubelet/network/dns/dns_test.go +++ b/pkg/kubelet/network/dns/dns_test.go @@ -104,7 +104,7 @@ func TestParseResolvConf(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, tc.nameservers, ns, "test case [%d]: name servers", i) assert.EqualValues(t, tc.searches, srch, "test case [%d] searches", i) - assert.EqualValues(t, sets.NewString(tc.options...), sets.NewString(opts...), "test case [%d] options", i) + assert.EqualValues(t, sets.New[string](tc.options...), sets.New[string](opts...), "test case [%d] options", i) } else { require.Error(t, err, "tc.searches %v", tc.searches) } @@ -334,7 +334,7 @@ func TestMergeDNSOptions(t *testing.T) { for _, tc := range testCases { options := mergeDNSOptions(tc.existingDNSConfigOptions, tc.dnsConfigOptions) // Options order may be changed after conversion. - if !sets.NewString(options...).Equal(sets.NewString(tc.expectedOptions...)) { + if !sets.New[string](options...).Equal(sets.New[string](tc.expectedOptions...)) { t.Errorf("%s: mergeDNSOptions(%v, %v)=%v, want %v", tc.desc, tc.existingDNSConfigOptions, tc.dnsConfigOptions, options, tc.expectedOptions) } } @@ -697,7 +697,7 @@ func dnsConfigsAreEqual(resConfig, expectedConfig *runtimeapi.DNSConfig) bool { } } // Options order may be changed after conversion. - return sets.NewString(resConfig.Options...).Equal(sets.NewString(expectedConfig.Options...)) + return sets.New[string](resConfig.Options...).Equal(sets.New[string](expectedConfig.Options...)) } func newTestPods(count int) []*v1.Pod { diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index 80da5bb059c..a020101920c 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -361,7 +361,7 @@ func (g *GenericPLEG) Relist() { } func getContainersFromPods(pods ...*kubecontainer.Pod) []*kubecontainer.Container { - cidSet := sets.NewString() + cidSet := sets.New[string]() var containers []*kubecontainer.Container fillCidSet := func(cs []*kubecontainer.Container) { for _, c := range cs { diff --git a/pkg/kubelet/pluginmanager/plugin_manager_test.go b/pkg/kubelet/pluginmanager/plugin_manager_test.go index 2790822446e..407f821977e 100644 --- a/pkg/kubelet/pluginmanager/plugin_manager_test.go +++ b/pkg/kubelet/pluginmanager/plugin_manager_test.go @@ -131,7 +131,7 @@ func TestPluginRegistration(t *testing.T) { stopChan := make(chan struct{}) defer close(stopChan) go func() { - sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) + sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true }) pluginManager.Run(sourcesReady, stopChan) }() diff --git a/pkg/kubelet/pod/testing/fake_mirror_client.go b/pkg/kubelet/pod/testing/fake_mirror_client.go index 5bd9f755ab9..437a2f2a787 100644 --- a/pkg/kubelet/pod/testing/fake_mirror_client.go +++ b/pkg/kubelet/pod/testing/fake_mirror_client.go @@ -19,7 +19,7 @@ package testing import ( "sync" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -29,14 +29,14 @@ type FakeMirrorClient struct { mirrorPodLock sync.RWMutex // Note that a real mirror manager does not store the mirror pods in // itself. This fake manager does this to track calls. - mirrorPods sets.String + mirrorPods sets.Set[string] createCounts map[string]int deleteCounts map[string]int } func NewFakeMirrorClient() *FakeMirrorClient { m := FakeMirrorClient{} - m.mirrorPods = sets.NewString() + m.mirrorPods = sets.New[string]() m.createCounts = make(map[string]int) m.deleteCounts = make(map[string]int) return &m @@ -75,7 +75,7 @@ func (fmc *FakeMirrorClient) NumOfPods() int { func (fmc *FakeMirrorClient) GetPods() []string { fmc.mirrorPodLock.RLock() defer fmc.mirrorPodLock.RUnlock() - return fmc.mirrorPods.List() + return sets.List(fmc.mirrorPods) } func (fmc *FakeMirrorClient) GetCounts(podFullName string) (int, int) { diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index 93e2e601aee..a0eee0b02e7 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -220,10 +220,10 @@ func (q *fakeQueue) Items() []FakeQueueItem { return append(make([]FakeQueueItem, 0, len(q.queue)), q.queue...) } -func (q *fakeQueue) Set() sets.String { +func (q *fakeQueue) Set() sets.Set[string] { q.lock.Lock() defer q.lock.Unlock() - work := sets.NewString() + work := sets.New[string]() for _, item := range q.queue[q.currentStart:] { work.Insert(string(item.UID)) } @@ -476,7 +476,7 @@ func drainWorkers(podWorkers *podWorkers, numPods int) { } func drainWorkersExcept(podWorkers *podWorkers, uids ...types.UID) { - set := sets.NewString() + set := sets.New[string]() for _, uid := range uids { set.Insert(string(uid)) } @@ -982,8 +982,8 @@ func TestUpdatePodDoesNotForgetSyncPodKill(t *testing.T) { } } -func newUIDSet(uids ...types.UID) sets.String { - set := sets.NewString() +func newUIDSet(uids ...types.UID) sets.Set[string] { + set := sets.New[string]() for _, uid := range uids { set.Insert(string(uid)) } @@ -993,7 +993,7 @@ func newUIDSet(uids ...types.UID) sets.String { type terminalPhaseSync struct { lock sync.Mutex fn syncPodFnType - terminal sets.String + terminal sets.Set[string] } func (s *terminalPhaseSync) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) { @@ -1018,7 +1018,7 @@ func (s *terminalPhaseSync) SetTerminal(uid types.UID) { func newTerminalPhaseSync(fn syncPodFnType) *terminalPhaseSync { return &terminalPhaseSync{ fn: fn, - terminal: sets.NewString(), + terminal: sets.New[string](), } } @@ -1171,7 +1171,7 @@ func TestStaticPodExclusion(t *testing.T) { t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a)) } // verify all are enqueued - if e, a := sets.NewString("1-normal", "2-static", "4-static", "3-static"), podWorkers.workQueue.(*fakeQueue).Set(); !e.Equal(a) { + if e, a := sets.New[string]("1-normal", "2-static", "4-static", "3-static"), podWorkers.workQueue.(*fakeQueue).Set(); !e.Equal(a) { t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a)) } @@ -1191,7 +1191,7 @@ func TestStaticPodExclusion(t *testing.T) { t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a)) } // the queue should include a single item for 3-static (indicating we need to retry later) - if e, a := sets.NewString("3-static"), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) { + if e, a := sets.New[string]("3-static"), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) { t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a)) } @@ -1209,7 +1209,7 @@ func TestStaticPodExclusion(t *testing.T) { t.Fatalf("unexpected pod state: %#v", pod3) } // the queue should be empty because the worker is now done - if e, a := sets.NewString(), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) { + if e, a := sets.New[string](), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) { t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a)) } // 2-static is still running diff --git a/pkg/kubelet/secret/secret_manager.go b/pkg/kubelet/secret/secret_manager.go index bba17fa8fd9..dd6ec256ad6 100644 --- a/pkg/kubelet/secret/secret_manager.go +++ b/pkg/kubelet/secret/secret_manager.go @@ -99,8 +99,8 @@ func (s *secretManager) UnregisterPod(pod *v1.Pod) { s.manager.UnregisterPod(pod) } -func getSecretNames(pod *v1.Pod) sets.String { - result := sets.NewString() +func getSecretNames(pod *v1.Pod) sets.Set[string] { + result := sets.New[string]() podutil.VisitPodSecretNames(pod, func(name string) bool { result.Insert(name) return true diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 0da53926ec5..1c550a18789 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -108,8 +108,8 @@ type Server struct { auth AuthInterface host HostInterface restfulCont containerInterface - metricsBuckets sets.String - metricsMethodBuckets sets.String + metricsBuckets sets.Set[string] + metricsMethodBuckets sets.Set[string] resourceAnalyzer stats.ResourceAnalyzer } @@ -280,8 +280,8 @@ func NewServer( resourceAnalyzer: resourceAnalyzer, auth: auth, restfulCont: &filteringContainer{Container: restful.NewContainer()}, - metricsBuckets: sets.NewString(), - metricsMethodBuckets: sets.NewString("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"), + metricsBuckets: sets.New[string](), + metricsMethodBuckets: sets.New[string]("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"), } if auth != nil { server.InstallAuthFilter() diff --git a/pkg/kubelet/userns/userns_manager.go b/pkg/kubelet/userns/userns_manager.go index c431e0511af..41f7094cc4b 100644 --- a/pkg/kubelet/userns/userns_manager.go +++ b/pkg/kubelet/userns/userns_manager.go @@ -472,7 +472,7 @@ func (m *UsernsManager) CleanupOrphanedPodUsernsAllocations(pods []*v1.Pod, runn m.lock.Lock() defer m.lock.Unlock() - allPods := sets.NewString() + allPods := sets.New[string]() for _, pod := range pods { allPods.Insert(string(pod.UID)) } @@ -480,7 +480,7 @@ func (m *UsernsManager) CleanupOrphanedPodUsernsAllocations(pods []*v1.Pod, runn allPods.Insert(string(pod.ID)) } - allFound := sets.NewString() + allFound := sets.New[string]() found, err := m.kl.ListPodsFromDisk() if err != nil { return err diff --git a/pkg/kubelet/util/manager/cache_based_manager.go b/pkg/kubelet/util/manager/cache_based_manager.go index c8f93481251..5f478cf3f57 100644 --- a/pkg/kubelet/util/manager/cache_based_manager.go +++ b/pkg/kubelet/util/manager/cache_based_manager.go @@ -210,7 +210,7 @@ func (s *objectStore) Get(namespace, name string) (runtime.Object, error) { // (e.g. ttl-based implementation vs watch-based implementation). type cacheBasedManager struct { objectStore Store - getReferencedObjects func(*v1.Pod) sets.String + getReferencedObjects func(*v1.Pod) sets.Set[string] lock sync.Mutex registeredPods map[objectKey]*v1.Pod @@ -273,7 +273,7 @@ func (c *cacheBasedManager) UnregisterPod(pod *v1.Pod) { // - every GetObject() call tries to fetch the value from local cache; if it is // not there, invalidated or too old, we fetch it from apiserver and refresh the // value in cache; otherwise it is just fetched from cache -func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.String) Manager { +func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.Set[string]) Manager { return &cacheBasedManager{ objectStore: objectStore, getReferencedObjects: getReferencedObjects, diff --git a/pkg/kubelet/util/manager/cache_based_manager_test.go b/pkg/kubelet/util/manager/cache_based_manager_test.go index 38bb2439b9a..c4aa96f0bca 100644 --- a/pkg/kubelet/util/manager/cache_based_manager_test.go +++ b/pkg/kubelet/util/manager/cache_based_manager_test.go @@ -73,8 +73,8 @@ func newSecretStore(fakeClient clientset.Interface, clock clock.Clock, getTTL Ge } } -func getSecretNames(pod *v1.Pod) sets.String { - result := sets.NewString() +func getSecretNames(pod *v1.Pod) sets.Set[string] { + result := sets.New[string]() podutil.VisitPodSecretNames(pod, func(name string) bool { result.Insert(name) return true diff --git a/pkg/kubelet/util/manager/watch_based_manager.go b/pkg/kubelet/util/manager/watch_based_manager.go index 91c79176472..cbc42fa6bf1 100644 --- a/pkg/kubelet/util/manager/watch_based_manager.go +++ b/pkg/kubelet/util/manager/watch_based_manager.go @@ -387,7 +387,7 @@ func NewWatchBasedManager( isImmutable isImmutableFunc, groupResource schema.GroupResource, resyncInterval time.Duration, - getReferencedObjects func(*v1.Pod) sets.String) Manager { + getReferencedObjects func(*v1.Pod) sets.Set[string]) Manager { // If a configmap/secret is used as a volume, the volumeManager will visit the objectCacheItem every resyncInterval cycle, // We just want to stop the objectCacheItem referenced by environment variables, diff --git a/pkg/kubelet/util/queue/work_queue_test.go b/pkg/kubelet/util/queue/work_queue_test.go index 47f4635438f..2afbbbdb9ae 100644 --- a/pkg/kubelet/util/queue/work_queue_test.go +++ b/pkg/kubelet/util/queue/work_queue_test.go @@ -37,16 +37,16 @@ func newTestBasicWorkQueue() (*basicWorkQueue, *testingclock.FakeClock) { } func compareResults(t *testing.T, expected, actual []types.UID) { - expectedSet := sets.NewString() + expectedSet := sets.New[string]() for _, u := range expected { expectedSet.Insert(string(u)) } - actualSet := sets.NewString() + actualSet := sets.New[string]() for _, u := range actual { actualSet.Insert(string(u)) } if !expectedSet.Equal(actualSet) { - t.Errorf("Expected %#v, got %#v", expectedSet.List(), actualSet.List()) + t.Errorf("Expected %#v, got %#v", sets.List(expectedSet), sets.List(actualSet)) } } diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index d5a4da7f6eb..0702016818b 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -151,7 +151,7 @@ func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr, seLinuxTran return &desiredStateOfWorld{ volumesToMount: make(map[v1.UniqueVolumeName]volumeToMount), volumePluginMgr: volumePluginMgr, - podErrors: make(map[types.UniquePodName]sets.String), + podErrors: make(map[types.UniquePodName]sets.Set[string]), seLinuxTranslator: seLinuxTranslator, } } @@ -166,7 +166,7 @@ type desiredStateOfWorld struct { // plugin objects. volumePluginMgr *volume.VolumePluginMgr // podErrors are errors caught by desiredStateOfWorldPopulator about volumes for a given pod. - podErrors map[types.UniquePodName]sets.String + podErrors map[types.UniquePodName]sets.Set[string] // seLinuxTranslator translates v1.SELinuxOptions to a file SELinux label. seLinuxTranslator util.SELinuxLabelTranslator @@ -629,7 +629,7 @@ func (dsw *desiredStateOfWorld) AddErrorToPod(podName types.UniquePodName, err s } return } - dsw.podErrors[podName] = sets.NewString(err) + dsw.podErrors[podName] = sets.New[string](err) } func (dsw *desiredStateOfWorld) PopPodErrors(podName types.UniquePodName) []string { @@ -638,7 +638,7 @@ func (dsw *desiredStateOfWorld) PopPodErrors(podName types.UniquePodName) []stri if errs, found := dsw.podErrors[podName]; found { delete(dsw.podErrors, podName) - return errs.List() + return sets.List(errs) } return []string{} } diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct_test.go b/pkg/kubelet/volumemanager/reconciler/reconstruct_test.go index c5715d2ba2b..c8a05db9a90 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct_test.go @@ -114,12 +114,12 @@ func TestReconstructVolumes(t *testing.T) { t.Errorf("Expected expectedVolumesNeedDevicePath:\n%v\n got:\n%v", expectedVolumes, rcInstance.volumesNeedUpdateFromNodeStatus) } - volumesFailedReconstruction := sets.NewString() + volumesFailedReconstruction := sets.New[string]() for _, vol := range rcInstance.volumesFailedReconstruction { volumesFailedReconstruction.Insert(vol.volumeSpecName) } - if !reflect.DeepEqual(volumesFailedReconstruction.List(), tc.expectedVolumesFailedReconstruction) { - t.Errorf("Expected volumesFailedReconstruction:\n%v\n got:\n%v", tc.expectedVolumesFailedReconstruction, volumesFailedReconstruction.List()) + if !reflect.DeepEqual(sets.List(volumesFailedReconstruction), tc.expectedVolumesFailedReconstruction) { + t.Errorf("Expected volumesFailedReconstruction:\n%v\n got:\n%v", tc.expectedVolumesFailedReconstruction, sets.List(volumesFailedReconstruction)) } if tc.verifyFunc != nil { diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index f8b831665dd..79bb91c2f3a 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -323,7 +323,7 @@ func (vm *volumeManager) GetPossiblyMountedVolumesForPod(podName types.UniquePod func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { podName := util.GetUniquePodName(pod) - supplementalGroups := sets.NewString() + supplementalGroups := sets.New[string]() for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { if mountedVolume.VolumeGidValue != "" { @@ -332,7 +332,7 @@ func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { } result := make([]int64, 0, supplementalGroups.Len()) - for _, group := range supplementalGroups.List() { + for _, group := range sets.List(supplementalGroups) { iGroup, extra := getExtraSupplementalGid(group, pod) if !extra { continue @@ -480,7 +480,7 @@ func (vm *volumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error } func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, expectedVolumes []string) []string { - volumesNotInDSW := sets.NewString(expectedVolumes...) + volumesNotInDSW := sets.New[string](expectedVolumes...) for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() { if volumeToMount.PodName == uniquePodName { @@ -488,7 +488,7 @@ func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, e } } - return volumesNotInDSW.List() + return sets.List(volumesNotInDSW) } // getUnattachedVolumes returns a list of the volumes that are expected to be attached but @@ -534,7 +534,7 @@ func (vm *volumeManager) verifyVolumesUnmountedFunc(podName types.UniquePodName) // expectedVolumes. It returns a list of unmounted volumes. // The list also includes volume that may be mounted in uncertain state. func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string { - mountedVolumes := sets.NewString() + mountedVolumes := sets.New[string]() for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName) } @@ -543,7 +543,7 @@ func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expect // filterUnmountedVolumes adds each element of expectedVolumes that is not in // mountedVolumes to a list of unmountedVolumes and returns it. -func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string) []string { +func filterUnmountedVolumes(mountedVolumes sets.Set[string], expectedVolumes []string) []string { unmountedVolumes := []string{} for _, expectedVolume := range expectedVolumes { if !mountedVolumes.Has(expectedVolume) { diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index b6736f3a268..bf48450f8fb 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -542,7 +542,7 @@ func runVolumeManager(manager VolumeManager) chan struct{} { stopCh := make(chan struct{}) //readyCh := make(chan bool, 1) //readyCh <- true - sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) + sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true }) go manager.Run(sourcesReady, stopCh) return stopCh } diff --git a/pkg/kubelet/winstats/network_stats.go b/pkg/kubelet/winstats/network_stats.go index b584d64381d..eb7f4cd8c53 100644 --- a/pkg/kubelet/winstats/network_stats.go +++ b/pkg/kubelet/winstats/network_stats.go @@ -180,7 +180,7 @@ func (n *networkCounter) mergeCollectedData(packetsReceivedPerSecondData, packetsReceivedErrorsData, packetsOutboundDiscardedData, packetsOutboundErrorsData map[string]uint64) { - adapters := sets.NewString() + adapters := sets.New[string]() // merge the collected data and list of adapters. adapters.Insert(n.mergePacketsReceivedPerSecondData(packetsReceivedPerSecondData)...)