mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #64170 from mtaufen/cap-node-num-images
Automatic merge from submit-queue (batch tested with PRs 61803, 64305, 64170, 64361, 64339). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. add a flag to control the cap on images reported in node status While I normally try to avoid adding flags, this is a short term scalability fix for v1.11, and there are other long-term solutions in the works, so we shouldn't commit to this in the v1beta1 Kubelet config. Flags are our escape hatch here. ```release-note NONE ```
This commit is contained in:
commit
e978c47f5e
@ -169,6 +169,9 @@ type KubeletFlags struct {
|
|||||||
// bootstrapCheckpointPath is the path to the directory containing pod checkpoints to
|
// bootstrapCheckpointPath is the path to the directory containing pod checkpoints to
|
||||||
// run on restore
|
// run on restore
|
||||||
BootstrapCheckpointPath string
|
BootstrapCheckpointPath string
|
||||||
|
// NodeStatusMaxImages caps the number of images reported in Node.Status.Images.
|
||||||
|
// This is an experimental, short-term flag to help with node scalability.
|
||||||
|
NodeStatusMaxImages int32
|
||||||
|
|
||||||
// DEPRECATED FLAGS
|
// DEPRECATED FLAGS
|
||||||
// minimumGCAge is the minimum age for a finished container before it is
|
// minimumGCAge is the minimum age for a finished container before it is
|
||||||
@ -244,6 +247,8 @@ func NewKubeletFlags() *KubeletFlags {
|
|||||||
CAdvisorPort: 0,
|
CAdvisorPort: 0,
|
||||||
// TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated
|
// TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated
|
||||||
AllowPrivileged: true,
|
AllowPrivileged: true,
|
||||||
|
// prior to the introduction of this flag, there was a hardcoded cap of 50 images
|
||||||
|
NodeStatusMaxImages: 50,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,6 +260,9 @@ func ValidateKubeletFlags(f *KubeletFlags) error {
|
|||||||
if f.CAdvisorPort != 0 && utilvalidation.IsValidPortNum(int(f.CAdvisorPort)) != nil {
|
if f.CAdvisorPort != 0 && utilvalidation.IsValidPortNum(int(f.CAdvisorPort)) != nil {
|
||||||
return fmt.Errorf("invalid configuration: CAdvisorPort (--cadvisor-port) %v must be between 0 and 65535, inclusive", f.CAdvisorPort)
|
return fmt.Errorf("invalid configuration: CAdvisorPort (--cadvisor-port) %v must be between 0 and 65535, inclusive", f.CAdvisorPort)
|
||||||
}
|
}
|
||||||
|
if f.NodeStatusMaxImages < -1 {
|
||||||
|
return fmt.Errorf("invalid configuration: NodeStatusMaxImages (--node-status-max-images) must be -1 or greater")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,6 +400,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
|
|||||||
fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.")
|
fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.")
|
||||||
fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, "<Warning: Alpha feature> Directory path for seccomp profiles.")
|
fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, "<Warning: Alpha feature> Directory path for seccomp profiles.")
|
||||||
fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, "<Warning: Alpha feature> Path to to the directory where the checkpoints are stored")
|
fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, "<Warning: Alpha feature> Path to to the directory where the checkpoints are stored")
|
||||||
|
fs.Int32Var(&f.NodeStatusMaxImages, "node-status-max-images", f.NodeStatusMaxImages, "<Warning: Alpha feature> The maximum number of images to report in Node.Status.Images. If -1 is specified, no cap will be applied. Default: 50")
|
||||||
|
|
||||||
// DEPRECATED FLAGS
|
// DEPRECATED FLAGS
|
||||||
fs.StringVar(&f.BootstrapKubeconfig, "experimental-bootstrap-kubeconfig", f.BootstrapKubeconfig, "")
|
fs.StringVar(&f.BootstrapKubeconfig, "experimental-bootstrap-kubeconfig", f.BootstrapKubeconfig, "")
|
||||||
|
@ -958,6 +958,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.
|
|||||||
kubeFlags.NodeLabels,
|
kubeFlags.NodeLabels,
|
||||||
kubeFlags.SeccompProfileRoot,
|
kubeFlags.SeccompProfileRoot,
|
||||||
kubeFlags.BootstrapCheckpointPath,
|
kubeFlags.BootstrapCheckpointPath,
|
||||||
|
kubeFlags.NodeStatusMaxImages,
|
||||||
stopCh)
|
stopCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create kubelet: %v", err)
|
return fmt.Errorf("failed to create kubelet: %v", err)
|
||||||
@ -1043,6 +1044,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
nodeLabels map[string]string,
|
nodeLabels map[string]string,
|
||||||
seccompProfileRoot string,
|
seccompProfileRoot string,
|
||||||
bootstrapCheckpointPath string,
|
bootstrapCheckpointPath string,
|
||||||
|
nodeStatusMaxImages int32,
|
||||||
stopCh <-chan struct{}) (k kubelet.Bootstrap, err error) {
|
stopCh <-chan struct{}) (k kubelet.Bootstrap, err error) {
|
||||||
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
|
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
|
||||||
// up into "per source" synchronizations
|
// up into "per source" synchronizations
|
||||||
@ -1077,6 +1079,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
nodeLabels,
|
nodeLabels,
|
||||||
seccompProfileRoot,
|
seccompProfileRoot,
|
||||||
bootstrapCheckpointPath,
|
bootstrapCheckpointPath,
|
||||||
|
nodeStatusMaxImages,
|
||||||
stopCh)
|
stopCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -219,7 +219,9 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
keepTerminatedPodVolumes bool,
|
keepTerminatedPodVolumes bool,
|
||||||
nodeLabels map[string]string,
|
nodeLabels map[string]string,
|
||||||
seccompProfileRoot string,
|
seccompProfileRoot string,
|
||||||
bootstrapCheckpointPath string) (Bootstrap, error)
|
bootstrapCheckpointPath string,
|
||||||
|
nodeStatusMaxImages int32,
|
||||||
|
stopCh <-chan struct{}) (Bootstrap, error)
|
||||||
|
|
||||||
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
|
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
|
||||||
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
|
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
|
||||||
@ -345,6 +347,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
nodeLabels map[string]string,
|
nodeLabels map[string]string,
|
||||||
seccompProfileRoot string,
|
seccompProfileRoot string,
|
||||||
bootstrapCheckpointPath string,
|
bootstrapCheckpointPath string,
|
||||||
|
nodeStatusMaxImages int32,
|
||||||
stopCh <-chan struct{}) (*Kubelet, error) {
|
stopCh <-chan struct{}) (*Kubelet, error) {
|
||||||
if rootDirectory == "" {
|
if rootDirectory == "" {
|
||||||
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
||||||
@ -535,6 +538,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
|
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
|
||||||
experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate),
|
experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate),
|
||||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
||||||
|
nodeStatusMaxImages: nodeStatusMaxImages,
|
||||||
}
|
}
|
||||||
|
|
||||||
if klet.cloud != nil {
|
if klet.cloud != nil {
|
||||||
@ -1157,6 +1161,9 @@ type Kubelet struct {
|
|||||||
// such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the
|
// such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the
|
||||||
// directory returned by kubelet.getPluginsDir()
|
// directory returned by kubelet.getPluginsDir()
|
||||||
pluginWatcher pluginwatcher.Watcher
|
pluginWatcher pluginwatcher.Watcher
|
||||||
|
|
||||||
|
// This flag sets a maximum number of images to report in the node status.
|
||||||
|
nodeStatusMaxImages int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func allGlobalUnicastIPs() ([]net.IP, error) {
|
func allGlobalUnicastIPs() ([]net.IP, error) {
|
||||||
|
@ -49,9 +49,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// maxImagesInNodeStatus is the number of max images we store in image status.
|
|
||||||
maxImagesInNodeStatus = 50
|
|
||||||
|
|
||||||
// maxNamesPerImageInNodeStatus is max number of names per image stored in
|
// maxNamesPerImageInNodeStatus is max number of names per image stored in
|
||||||
// the node status.
|
// the node status.
|
||||||
maxNamesPerImageInNodeStatus = 5
|
maxNamesPerImageInNodeStatus = 5
|
||||||
@ -721,8 +718,9 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// sort the images from max to min, and only set top N images into the node status.
|
// sort the images from max to min, and only set top N images into the node status.
|
||||||
if maxImagesInNodeStatus < len(containerImages) {
|
if int(kl.nodeStatusMaxImages) > -1 &&
|
||||||
containerImages = containerImages[0:maxImagesInNodeStatus]
|
int(kl.nodeStatusMaxImages) < len(containerImages) {
|
||||||
|
containerImages = containerImages[0:kl.nodeStatusMaxImages]
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, image := range containerImages {
|
for _, image := range containerImages {
|
||||||
|
@ -60,8 +60,8 @@ const (
|
|||||||
maxImageTagsForTest = 20
|
maxImageTagsForTest = 20
|
||||||
)
|
)
|
||||||
|
|
||||||
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
|
// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
|
||||||
func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) {
|
func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
|
||||||
// imageList is randomly generated image list
|
// imageList is randomly generated image list
|
||||||
var imageList []kubecontainer.Image
|
var imageList []kubecontainer.Image
|
||||||
for ; count > 0; count-- {
|
for ; count > 0; count-- {
|
||||||
@ -73,7 +73,12 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI
|
|||||||
imageList = append(imageList, imageItem)
|
imageList = append(imageList, imageItem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
|
expectedImageList := makeExpectedImageList(imageList, maxImages)
|
||||||
|
return imageList, expectedImageList
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
|
||||||
|
// expectedImageList is generated by imageList according to size and maxImages
|
||||||
// 1. sort the imageList by size
|
// 1. sort the imageList by size
|
||||||
sort.Sort(sliceutils.ByImageSize(imageList))
|
sort.Sort(sliceutils.ByImageSize(imageList))
|
||||||
// 2. convert sorted imageList to v1.ContainerImage list
|
// 2. convert sorted imageList to v1.ContainerImage list
|
||||||
@ -86,8 +91,11 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI
|
|||||||
|
|
||||||
expectedImageList = append(expectedImageList, apiImage)
|
expectedImageList = append(expectedImageList, apiImage)
|
||||||
}
|
}
|
||||||
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
|
// 3. only returns the top maxImages images in expectedImageList
|
||||||
return imageList, expectedImageList[0:maxImagesInNodeStatus]
|
if maxImages == -1 { // -1 means no limit
|
||||||
|
return expectedImageList
|
||||||
|
}
|
||||||
|
return expectedImageList[0:maxImages]
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateImageTags() []string {
|
func generateImageTags() []string {
|
||||||
@ -299,165 +307,190 @@ func sortNodeAddresses(addrs sortableNodeAddress) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateNewNodeStatus(t *testing.T) {
|
func TestUpdateNewNodeStatus(t *testing.T) {
|
||||||
// generate one more than maxImagesInNodeStatus in inputImageList
|
cases := []struct {
|
||||||
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
|
desc string
|
||||||
testKubelet := newTestKubeletWithImageList(
|
nodeStatusMaxImages int32
|
||||||
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
}{
|
||||||
defer testKubelet.Cleanup()
|
{
|
||||||
kubelet := testKubelet.kubelet
|
desc: "5 image limit",
|
||||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
nodeStatusMaxImages: 5,
|
||||||
kubelet.containerManager = &localCM{
|
|
||||||
ContainerManager: cm.NewStubContainerManager(),
|
|
||||||
allocatableReservation: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
|
||||||
},
|
},
|
||||||
capacity: v1.ResourceList{
|
{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
desc: "no image limit",
|
||||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
nodeStatusMaxImages: -1,
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
kubeClient := testKubelet.fakeKubeClient
|
|
||||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
|
||||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
|
||||||
machineInfo := &cadvisorapi.MachineInfo{
|
|
||||||
MachineID: "123",
|
|
||||||
SystemUUID: "abc",
|
|
||||||
BootID: "1b3",
|
|
||||||
NumCores: 2,
|
|
||||||
MemoryCapacity: 10E9, // 10G
|
|
||||||
}
|
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
|
||||||
mockCadvisor.On("Start").Return(nil)
|
|
||||||
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
||||||
versionInfo := &cadvisorapi.VersionInfo{
|
|
||||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
||||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
||||||
}
|
|
||||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
||||||
Usage: 400,
|
|
||||||
Capacity: 5000,
|
|
||||||
Available: 600,
|
|
||||||
}, nil)
|
|
||||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
||||||
Usage: 400,
|
|
||||||
Capacity: 5000,
|
|
||||||
Available: 600,
|
|
||||||
}, nil)
|
|
||||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
||||||
maxAge := 0 * time.Second
|
|
||||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
|
||||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
|
||||||
kubelet.machineInfo = machineInfo
|
|
||||||
|
|
||||||
expectedNode := &v1.Node{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
|
||||||
Spec: v1.NodeSpec{},
|
|
||||||
Status: v1.NodeStatus{
|
|
||||||
Conditions: []v1.NodeCondition{
|
|
||||||
{
|
|
||||||
Type: v1.NodeOutOfDisk,
|
|
||||||
Status: v1.ConditionFalse,
|
|
||||||
Reason: "KubeletHasSufficientDisk",
|
|
||||||
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
|
||||||
LastHeartbeatTime: metav1.Time{},
|
|
||||||
LastTransitionTime: metav1.Time{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Type: v1.NodeMemoryPressure,
|
|
||||||
Status: v1.ConditionFalse,
|
|
||||||
Reason: "KubeletHasSufficientMemory",
|
|
||||||
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
|
||||||
LastHeartbeatTime: metav1.Time{},
|
|
||||||
LastTransitionTime: metav1.Time{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Type: v1.NodeDiskPressure,
|
|
||||||
Status: v1.ConditionFalse,
|
|
||||||
Reason: "KubeletHasNoDiskPressure",
|
|
||||||
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
|
||||||
LastHeartbeatTime: metav1.Time{},
|
|
||||||
LastTransitionTime: metav1.Time{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Type: v1.NodePIDPressure,
|
|
||||||
Status: v1.ConditionFalse,
|
|
||||||
Reason: "KubeletHasSufficientPID",
|
|
||||||
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
|
||||||
LastHeartbeatTime: metav1.Time{},
|
|
||||||
LastTransitionTime: metav1.Time{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Type: v1.NodeReady,
|
|
||||||
Status: v1.ConditionTrue,
|
|
||||||
Reason: "KubeletReady",
|
|
||||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
||||||
LastHeartbeatTime: metav1.Time{},
|
|
||||||
LastTransitionTime: metav1.Time{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
NodeInfo: v1.NodeSystemInfo{
|
|
||||||
MachineID: "123",
|
|
||||||
SystemUUID: "abc",
|
|
||||||
BootID: "1b3",
|
|
||||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
||||||
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
||||||
OperatingSystem: goruntime.GOOS,
|
|
||||||
Architecture: goruntime.GOARCH,
|
|
||||||
ContainerRuntimeVersion: "test://1.5.0",
|
|
||||||
KubeletVersion: version.Get().String(),
|
|
||||||
KubeProxyVersion: version.Get().String(),
|
|
||||||
},
|
|
||||||
Capacity: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
|
||||||
},
|
|
||||||
Allocatable: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
|
||||||
},
|
|
||||||
Addresses: []v1.NodeAddress{
|
|
||||||
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
||||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
||||||
},
|
|
||||||
Images: expectedImageList,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
kubelet.updateRuntimeUp()
|
for _, tc := range cases {
|
||||||
assert.NoError(t, kubelet.updateNodeStatus())
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
actions := kubeClient.Actions()
|
// generate one more in inputImageList than we configure the Kubelet to report,
|
||||||
require.Len(t, actions, 2)
|
// or 5 images if unlimited
|
||||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
numTestImages := int(tc.nodeStatusMaxImages) + 1
|
||||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
if tc.nodeStatusMaxImages == -1 {
|
||||||
|
numTestImages = 5
|
||||||
|
}
|
||||||
|
inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
|
||||||
|
testKubelet := newTestKubeletWithImageList(
|
||||||
|
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
||||||
|
defer testKubelet.Cleanup()
|
||||||
|
kubelet := testKubelet.kubelet
|
||||||
|
kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
|
||||||
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||||
|
kubelet.containerManager = &localCM{
|
||||||
|
ContainerManager: cm.NewStubContainerManager(),
|
||||||
|
allocatableReservation: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||||
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
||||||
|
},
|
||||||
|
capacity: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||||
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
kubeClient := testKubelet.fakeKubeClient
|
||||||
|
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||||
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||||
|
machineInfo := &cadvisorapi.MachineInfo{
|
||||||
|
MachineID: "123",
|
||||||
|
SystemUUID: "abc",
|
||||||
|
BootID: "1b3",
|
||||||
|
NumCores: 2,
|
||||||
|
MemoryCapacity: 10E9, // 10G
|
||||||
|
}
|
||||||
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
|
mockCadvisor.On("Start").Return(nil)
|
||||||
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
||||||
|
versionInfo := &cadvisorapi.VersionInfo{
|
||||||
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||||
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||||
|
}
|
||||||
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||||
|
Usage: 400,
|
||||||
|
Capacity: 5000,
|
||||||
|
Available: 600,
|
||||||
|
}, nil)
|
||||||
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||||
|
Usage: 400,
|
||||||
|
Capacity: 5000,
|
||||||
|
Available: 600,
|
||||||
|
}, nil)
|
||||||
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||||
|
maxAge := 0 * time.Second
|
||||||
|
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||||
|
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||||
|
kubelet.machineInfo = machineInfo
|
||||||
|
|
||||||
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
expectedNode := &v1.Node{
|
||||||
assert.NoError(t, err)
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||||
for i, cond := range updatedNode.Status.Conditions {
|
Spec: v1.NodeSpec{},
|
||||||
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
Status: v1.NodeStatus{
|
||||||
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
Conditions: []v1.NodeCondition{
|
||||||
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
{
|
||||||
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
Type: v1.NodeOutOfDisk,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
Reason: "KubeletHasSufficientDisk",
|
||||||
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
||||||
|
LastHeartbeatTime: metav1.Time{},
|
||||||
|
LastTransitionTime: metav1.Time{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.NodeMemoryPressure,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
Reason: "KubeletHasSufficientMemory",
|
||||||
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
||||||
|
LastHeartbeatTime: metav1.Time{},
|
||||||
|
LastTransitionTime: metav1.Time{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.NodeDiskPressure,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
Reason: "KubeletHasNoDiskPressure",
|
||||||
|
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
||||||
|
LastHeartbeatTime: metav1.Time{},
|
||||||
|
LastTransitionTime: metav1.Time{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.NodePIDPressure,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
Reason: "KubeletHasSufficientPID",
|
||||||
|
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
||||||
|
LastHeartbeatTime: metav1.Time{},
|
||||||
|
LastTransitionTime: metav1.Time{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.NodeReady,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "KubeletReady",
|
||||||
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||||
|
LastHeartbeatTime: metav1.Time{},
|
||||||
|
LastTransitionTime: metav1.Time{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NodeInfo: v1.NodeSystemInfo{
|
||||||
|
MachineID: "123",
|
||||||
|
SystemUUID: "abc",
|
||||||
|
BootID: "1b3",
|
||||||
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||||
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
||||||
|
OperatingSystem: goruntime.GOOS,
|
||||||
|
Architecture: goruntime.GOARCH,
|
||||||
|
ContainerRuntimeVersion: "test://1.5.0",
|
||||||
|
KubeletVersion: version.Get().String(),
|
||||||
|
KubeProxyVersion: version.Get().String(),
|
||||||
|
},
|
||||||
|
Capacity: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||||
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||||
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||||
|
},
|
||||||
|
Allocatable: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||||
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||||
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||||
|
},
|
||||||
|
Addresses: []v1.NodeAddress{
|
||||||
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||||
|
},
|
||||||
|
Images: expectedImageList,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
kubelet.updateRuntimeUp()
|
||||||
|
assert.NoError(t, kubelet.updateNodeStatus())
|
||||||
|
actions := kubeClient.Actions()
|
||||||
|
require.Len(t, actions, 2)
|
||||||
|
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||||
|
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||||
|
|
||||||
|
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
for i, cond := range updatedNode.Status.Conditions {
|
||||||
|
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
||||||
|
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
||||||
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
||||||
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
||||||
|
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
|
||||||
|
"NotReady should be last")
|
||||||
|
assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
|
||||||
|
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
||||||
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
|
|
||||||
"NotReady should be last")
|
|
||||||
assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus)
|
|
||||||
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateExistingNodeStatus(t *testing.T) {
|
func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
defer testKubelet.Cleanup()
|
defer testKubelet.Cleanup()
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
|
||||||
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||||
kubelet.containerManager = &localCM{
|
kubelet.containerManager = &localCM{
|
||||||
ContainerManager: cm.NewStubContainerManager(),
|
ContainerManager: cm.NewStubContainerManager(),
|
||||||
allocatableReservation: v1.ResourceList{
|
allocatableReservation: v1.ResourceList{
|
||||||
@ -742,7 +775,8 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
defer testKubelet.Cleanup()
|
defer testKubelet.Cleanup()
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
|
||||||
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||||
kubelet.containerManager = &localCM{
|
kubelet.containerManager = &localCM{
|
||||||
ContainerManager: cm.NewStubContainerManager(),
|
ContainerManager: cm.NewStubContainerManager(),
|
||||||
allocatableReservation: v1.ResourceList{
|
allocatableReservation: v1.ResourceList{
|
||||||
@ -1213,12 +1247,15 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||||
// generate one more than maxImagesInNodeStatus in inputImageList
|
const nodeStatusMaxImages = 5
|
||||||
inputImageList, _ := generateTestingImageList(maxImagesInNodeStatus + 1)
|
|
||||||
|
// generate one more in inputImageList than we configure the Kubelet to report
|
||||||
|
inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
|
||||||
testKubelet := newTestKubeletWithImageList(
|
testKubelet := newTestKubeletWithImageList(
|
||||||
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
||||||
defer testKubelet.Cleanup()
|
defer testKubelet.Cleanup()
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
|
kubelet.nodeStatusMaxImages = nodeStatusMaxImages
|
||||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||||
kubelet.containerManager = &localCM{
|
kubelet.containerManager = &localCM{
|
||||||
ContainerManager: cm.NewStubContainerManager(),
|
ContainerManager: cm.NewStubContainerManager(),
|
||||||
|
Loading…
Reference in New Issue
Block a user