diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 5cc7935a941..3291a31ed12 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -169,6 +169,9 @@ type KubeletFlags struct { // bootstrapCheckpointPath is the path to the directory containing pod checkpoints to // run on restore BootstrapCheckpointPath string + // NodeStatusMaxImages caps the number of images reported in Node.Status.Images. + // This is an experimental, short-term flag to help with node scalability. + NodeStatusMaxImages int32 // DEPRECATED FLAGS // minimumGCAge is the minimum age for a finished container before it is @@ -244,6 +247,8 @@ func NewKubeletFlags() *KubeletFlags { CAdvisorPort: 0, // TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated AllowPrivileged: true, + // prior to the introduction of this flag, there was a hardcoded cap of 50 images + NodeStatusMaxImages: 50, } } @@ -255,6 +260,9 @@ func ValidateKubeletFlags(f *KubeletFlags) error { if f.CAdvisorPort != 0 && utilvalidation.IsValidPortNum(int(f.CAdvisorPort)) != nil { return fmt.Errorf("invalid configuration: CAdvisorPort (--cadvisor-port) %v must be between 0 and 65535, inclusive", f.CAdvisorPort) } + if f.NodeStatusMaxImages < -1 { + return fmt.Errorf("invalid configuration: NodeStatusMaxImages (--node-status-max-images) must be -1 or greater") + } return nil } @@ -392,6 +400,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.") fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, " Directory path for seccomp profiles.") fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, " Path to to the directory where the checkpoints are stored") + fs.Int32Var(&f.NodeStatusMaxImages, "node-status-max-images", f.NodeStatusMaxImages, " The maximum number of images to report in Node.Status.Images. If -1 is specified, no cap will be applied. Default: 50") // DEPRECATED FLAGS fs.StringVar(&f.BootstrapKubeconfig, "experimental-bootstrap-kubeconfig", f.BootstrapKubeconfig, "") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index ca13dc51d30..cfa23f2f7f4 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -958,6 +958,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. kubeFlags.NodeLabels, kubeFlags.SeccompProfileRoot, kubeFlags.BootstrapCheckpointPath, + kubeFlags.NodeStatusMaxImages, stopCh) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) @@ -1043,6 +1044,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeLabels map[string]string, seccompProfileRoot string, bootstrapCheckpointPath string, + nodeStatusMaxImages int32, stopCh <-chan struct{}) (k kubelet.Bootstrap, err error) { // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations @@ -1077,6 +1079,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeLabels, seccompProfileRoot, bootstrapCheckpointPath, + nodeStatusMaxImages, stopCh) if err != nil { return nil, err diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 9194c1bbb9c..a5459480eeb 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -219,7 +219,9 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration, keepTerminatedPodVolumes bool, nodeLabels map[string]string, seccompProfileRoot string, - bootstrapCheckpointPath string) (Bootstrap, error) + bootstrapCheckpointPath string, + nodeStatusMaxImages int32, + stopCh <-chan struct{}) (Bootstrap, error) // Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping @@ -345,6 +347,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeLabels map[string]string, seccompProfileRoot string, bootstrapCheckpointPath string, + nodeStatusMaxImages int32, stopCh <-chan struct{}) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) @@ -535,6 +538,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, iptablesDropBit: int(kubeCfg.IPTablesDropBit), experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate), keepTerminatedPodVolumes: keepTerminatedPodVolumes, + nodeStatusMaxImages: nodeStatusMaxImages, } if klet.cloud != nil { @@ -1157,6 +1161,9 @@ type Kubelet struct { // such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the // directory returned by kubelet.getPluginsDir() pluginWatcher pluginwatcher.Watcher + + // This flag sets a maximum number of images to report in the node status. + nodeStatusMaxImages int32 } func allGlobalUnicastIPs() ([]net.IP, error) { diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 9a1e6214b6a..47400d3baf5 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -49,9 +49,6 @@ import ( ) const ( - // maxImagesInNodeStatus is the number of max images we store in image status. - maxImagesInNodeStatus = 50 - // maxNamesPerImageInNodeStatus is max number of names per image stored in // the node status. maxNamesPerImageInNodeStatus = 5 @@ -721,8 +718,9 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) { return } // sort the images from max to min, and only set top N images into the node status. - if maxImagesInNodeStatus < len(containerImages) { - containerImages = containerImages[0:maxImagesInNodeStatus] + if int(kl.nodeStatusMaxImages) > -1 && + int(kl.nodeStatusMaxImages) < len(containerImages) { + containerImages = containerImages[0:kl.nodeStatusMaxImages] } for _, image := range containerImages { diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index a9ffe58605e..034560f14a3 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -60,8 +60,8 @@ const ( maxImageTagsForTest = 20 ) -// generateTestingImageList generate randomly generated image list and corresponding expectedImageList. -func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) { +// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList. +func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) { // imageList is randomly generated image list var imageList []kubecontainer.Image for ; count > 0; count-- { @@ -73,7 +73,12 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI imageList = append(imageList, imageItem) } - // expectedImageList is generated by imageList according to size and maxImagesInNodeStatus + expectedImageList := makeExpectedImageList(imageList, maxImages) + return imageList, expectedImageList +} + +func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage { + // expectedImageList is generated by imageList according to size and maxImages // 1. sort the imageList by size sort.Sort(sliceutils.ByImageSize(imageList)) // 2. convert sorted imageList to v1.ContainerImage list @@ -86,8 +91,11 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI expectedImageList = append(expectedImageList, apiImage) } - // 3. only returns the top maxImagesInNodeStatus images in expectedImageList - return imageList, expectedImageList[0:maxImagesInNodeStatus] + // 3. only returns the top maxImages images in expectedImageList + if maxImages == -1 { // -1 means no limit + return expectedImageList + } + return expectedImageList[0:maxImages] } func generateImageTags() []string { @@ -299,165 +307,190 @@ func sortNodeAddresses(addrs sortableNodeAddress) { } func TestUpdateNewNodeStatus(t *testing.T) { - // generate one more than maxImagesInNodeStatus in inputImageList - inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) - testKubelet := newTestKubeletWithImageList( - t, inputImageList, false /* controllerAttachDetachEnabled */) - defer testKubelet.Cleanup() - kubelet := testKubelet.kubelet - kubelet.kubeClient = nil // ensure only the heartbeat client is used - kubelet.containerManager = &localCM{ - ContainerManager: cm.NewStubContainerManager(), - allocatableReservation: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI), + cases := []struct { + desc string + nodeStatusMaxImages int32 + }{ + { + desc: "5 image limit", + nodeStatusMaxImages: 5, }, - capacity: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), - }, - } - kubeClient := testKubelet.fakeKubeClient - existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} - kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain - machineInfo := &cadvisorapi.MachineInfo{ - MachineID: "123", - SystemUUID: "abc", - BootID: "1b3", - NumCores: 2, - MemoryCapacity: 10E9, // 10G - } - mockCadvisor := testKubelet.fakeCadvisor - mockCadvisor.On("Start").Return(nil) - mockCadvisor.On("MachineInfo").Return(machineInfo, nil) - versionInfo := &cadvisorapi.VersionInfo{ - KernelVersion: "3.16.0-0.bpo.4-amd64", - ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", - } - mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ - Usage: 400, - Capacity: 5000, - Available: 600, - }, nil) - mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ - Usage: 400, - Capacity: 5000, - Available: 600, - }, nil) - mockCadvisor.On("VersionInfo").Return(versionInfo, nil) - maxAge := 0 * time.Second - options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} - mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) - kubelet.machineInfo = machineInfo - - expectedNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeOutOfDisk, - Status: v1.ConditionFalse, - Reason: "KubeletHasSufficientDisk", - Message: fmt.Sprintf("kubelet has sufficient disk space available"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodeMemoryPressure, - Status: v1.ConditionFalse, - Reason: "KubeletHasSufficientMemory", - Message: fmt.Sprintf("kubelet has sufficient memory available"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodeDiskPressure, - Status: v1.ConditionFalse, - Reason: "KubeletHasNoDiskPressure", - Message: fmt.Sprintf("kubelet has no disk pressure"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodePIDPressure, - Status: v1.ConditionFalse, - Reason: "KubeletHasSufficientPID", - Message: fmt.Sprintf("kubelet has sufficient PID available"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - Reason: "KubeletReady", - Message: fmt.Sprintf("kubelet is posting ready status"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - }, - NodeInfo: v1.NodeSystemInfo{ - MachineID: "123", - SystemUUID: "abc", - BootID: "1b3", - KernelVersion: "3.16.0-0.bpo.4-amd64", - OSImage: "Debian GNU/Linux 7 (wheezy)", - OperatingSystem: goruntime.GOOS, - Architecture: goruntime.GOARCH, - ContainerRuntimeVersion: "test://1.5.0", - KubeletVersion: version.Get().String(), - KubeProxyVersion: version.Get().String(), - }, - Capacity: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), - }, - Allocatable: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), - }, - Addresses: []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, - {Type: v1.NodeHostName, Address: testKubeletHostname}, - }, - Images: expectedImageList, + { + desc: "no image limit", + nodeStatusMaxImages: -1, }, } - kubelet.updateRuntimeUp() - assert.NoError(t, kubelet.updateNodeStatus()) - actions := kubeClient.Actions() - require.Len(t, actions, 2) - require.True(t, actions[1].Matches("patch", "nodes")) - require.Equal(t, actions[1].GetSubresource(), "status") + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + // generate one more in inputImageList than we configure the Kubelet to report, + // or 5 images if unlimited + numTestImages := int(tc.nodeStatusMaxImages) + 1 + if tc.nodeStatusMaxImages == -1 { + numTestImages = 5 + } + inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages)) + testKubelet := newTestKubeletWithImageList( + t, inputImageList, false /* controllerAttachDetachEnabled */) + defer testKubelet.Cleanup() + kubelet := testKubelet.kubelet + kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages + kubelet.kubeClient = nil // ensure only the heartbeat client is used + kubelet.containerManager = &localCM{ + ContainerManager: cm.NewStubContainerManager(), + allocatableReservation: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI), + }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + }, + } + kubeClient := testKubelet.fakeKubeClient + existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 10E9, // 10G + } + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("Start").Return(nil) + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + } + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400, + Capacity: 5000, + Available: 600, + }, nil) + mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400, + Capacity: 5000, + Available: 600, + }, nil) + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + maxAge := 0 * time.Second + options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} + mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) + kubelet.machineInfo = machineInfo - updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) - assert.NoError(t, err) - for i, cond := range updatedNode.Status.Conditions { - assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) - assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) - updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} - updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} + expectedNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeOutOfDisk, + Status: v1.ConditionFalse, + Reason: "KubeletHasSufficientDisk", + Message: fmt.Sprintf("kubelet has sufficient disk space available"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodeMemoryPressure, + Status: v1.ConditionFalse, + Reason: "KubeletHasSufficientMemory", + Message: fmt.Sprintf("kubelet has sufficient memory available"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodeDiskPressure, + Status: v1.ConditionFalse, + Reason: "KubeletHasNoDiskPressure", + Message: fmt.Sprintf("kubelet has no disk pressure"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodePIDPressure, + Status: v1.ConditionFalse, + Reason: "KubeletHasSufficientPID", + Message: fmt.Sprintf("kubelet has sufficient PID available"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + }, + NodeInfo: v1.NodeSystemInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + KernelVersion: "3.16.0-0.bpo.4-amd64", + OSImage: "Debian GNU/Linux 7 (wheezy)", + OperatingSystem: goruntime.GOOS, + Architecture: goruntime.GOARCH, + ContainerRuntimeVersion: "test://1.5.0", + KubeletVersion: version.Get().String(), + KubeProxyVersion: version.Get().String(), + }, + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), + }, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, + {Type: v1.NodeHostName, Address: testKubeletHostname}, + }, + Images: expectedImageList, + }, + } + + kubelet.updateRuntimeUp() + assert.NoError(t, kubelet.updateNodeStatus()) + actions := kubeClient.Actions() + require.Len(t, actions, 2) + require.True(t, actions[1].Matches("patch", "nodes")) + require.Equal(t, actions[1].GetSubresource(), "status") + + updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) + assert.NoError(t, err) + for i, cond := range updatedNode.Status.Conditions { + assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) + assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) + updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} + } + + // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 + assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, + "NotReady should be last") + assert.Len(t, updatedNode.Status.Images, len(expectedImageList)) + assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) + }) } - - // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 - assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, - "NotReady should be last") - assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus) - assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) } func TestUpdateExistingNodeStatus(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet - kubelet.kubeClient = nil // ensure only the heartbeat client is used + kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test + kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ @@ -742,7 +775,8 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet - kubelet.kubeClient = nil // ensure only the heartbeat client is used + kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test + kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ @@ -1213,12 +1247,15 @@ func TestTryRegisterWithApiServer(t *testing.T) { } func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { - // generate one more than maxImagesInNodeStatus in inputImageList - inputImageList, _ := generateTestingImageList(maxImagesInNodeStatus + 1) + const nodeStatusMaxImages = 5 + + // generate one more in inputImageList than we configure the Kubelet to report + inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages) testKubelet := newTestKubeletWithImageList( t, inputImageList, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet + kubelet.nodeStatusMaxImages = nodeStatusMaxImages kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(),