mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #128333 from AnishShah/eviction-manager
Revert #126562 that is causing eviction tests to fail
This commit is contained in:
commit
d3fd5940e4
@ -45,6 +45,8 @@ type GC interface {
|
|||||||
GarbageCollect(ctx context.Context) error
|
GarbageCollect(ctx context.Context) error
|
||||||
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
|
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
|
||||||
DeleteAllUnusedContainers(ctx context.Context) error
|
DeleteAllUnusedContainers(ctx context.Context) error
|
||||||
|
// IsContainerFsSeparateFromImageFs tells if writeable layer and read-only layer are separate.
|
||||||
|
IsContainerFsSeparateFromImageFs(ctx context.Context) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// SourcesReadyProvider knows how to determine if configuration sources are ready
|
// SourcesReadyProvider knows how to determine if configuration sources are ready
|
||||||
@ -86,3 +88,22 @@ func (cgc *realContainerGC) DeleteAllUnusedContainers(ctx context.Context) error
|
|||||||
klog.InfoS("Attempting to delete unused containers")
|
klog.InfoS("Attempting to delete unused containers")
|
||||||
return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
|
return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cgc *realContainerGC) IsContainerFsSeparateFromImageFs(ctx context.Context) bool {
|
||||||
|
resp, err := cgc.runtime.ImageFsInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// These fields can be empty if CRI implementation didn't populate.
|
||||||
|
if resp.ContainerFilesystems == nil || resp.ImageFilesystems == nil || len(resp.ContainerFilesystems) == 0 || len(resp.ImageFilesystems) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// KEP 4191 explains that multiple filesystems for images and containers is not
|
||||||
|
// supported at the moment.
|
||||||
|
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/4191-split-image-filesystem#comment-on-future-extensions
|
||||||
|
// for work needed to support multiple filesystems.
|
||||||
|
if resp.ContainerFilesystems[0].FsId != nil && resp.ImageFilesystems[0].FsId != nil {
|
||||||
|
return resp.ContainerFilesystems[0].FsId.Mountpoint != resp.ImageFilesystems[0].FsId.Mountpoint
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
96
pkg/kubelet/container/container_gc_test.go
Normal file
96
pkg/kubelet/container/container_gc_test.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package container_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||||
|
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIsContainerFsSeparateFromImageFs(t *testing.T) {
|
||||||
|
runtime := &ctest.FakeRuntime{}
|
||||||
|
fakeSources := ctest.NewFakeReadyProvider()
|
||||||
|
|
||||||
|
gcContainer, err := NewContainerGC(runtime, GCPolicy{}, fakeSources)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error")
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
containerFs []*runtimeapi.FilesystemUsage
|
||||||
|
imageFs []*runtimeapi.FilesystemUsage
|
||||||
|
writeableSeparateFromReadOnly bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Only images",
|
||||||
|
imageFs: []*runtimeapi.FilesystemUsage{{FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "image"}}},
|
||||||
|
writeableSeparateFromReadOnly: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "images and containers",
|
||||||
|
imageFs: []*runtimeapi.FilesystemUsage{{FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "image"}}},
|
||||||
|
containerFs: []*runtimeapi.FilesystemUsage{{FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "container"}}},
|
||||||
|
writeableSeparateFromReadOnly: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same filesystem",
|
||||||
|
imageFs: []*runtimeapi.FilesystemUsage{{FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "image"}}},
|
||||||
|
containerFs: []*runtimeapi.FilesystemUsage{{FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "image"}}},
|
||||||
|
writeableSeparateFromReadOnly: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "Only containers",
|
||||||
|
containerFs: []*runtimeapi.FilesystemUsage{{FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "image"}}},
|
||||||
|
writeableSeparateFromReadOnly: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "neither are specified",
|
||||||
|
writeableSeparateFromReadOnly: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "both are empty arrays",
|
||||||
|
writeableSeparateFromReadOnly: false,
|
||||||
|
containerFs: []*runtimeapi.FilesystemUsage{},
|
||||||
|
imageFs: []*runtimeapi.FilesystemUsage{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "FsId does not exist",
|
||||||
|
writeableSeparateFromReadOnly: false,
|
||||||
|
containerFs: []*runtimeapi.FilesystemUsage{{UsedBytes: &runtimeapi.UInt64Value{Value: 10}}},
|
||||||
|
imageFs: []*runtimeapi.FilesystemUsage{{UsedBytes: &runtimeapi.UInt64Value{Value: 10}}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
runtime.SetContainerFsStats(tc.containerFs)
|
||||||
|
runtime.SetImageFsStats(tc.imageFs)
|
||||||
|
actualCommand := gcContainer.IsContainerFsSeparateFromImageFs(context.TODO())
|
||||||
|
|
||||||
|
if e, a := tc.writeableSeparateFromReadOnly, actualCommand; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("%v: unexpected value; expected %v, got %v", tc.name, e, a)
|
||||||
|
}
|
||||||
|
runtime.SetContainerFsStats(nil)
|
||||||
|
runtime.SetImageFsStats(nil)
|
||||||
|
}
|
||||||
|
}
|
@ -252,17 +252,13 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
|||||||
// build the ranking functions (if not yet known)
|
// build the ranking functions (if not yet known)
|
||||||
// TODO: have a function in cadvisor that lets us know if global housekeeping has completed
|
// TODO: have a function in cadvisor that lets us know if global housekeeping has completed
|
||||||
if m.dedicatedImageFs == nil {
|
if m.dedicatedImageFs == nil {
|
||||||
hasImageFs, imageFsErr := diskInfoProvider.HasDedicatedImageFs(ctx)
|
hasImageFs, splitDiskError := diskInfoProvider.HasDedicatedImageFs(ctx)
|
||||||
if imageFsErr != nil {
|
if splitDiskError != nil {
|
||||||
klog.ErrorS(imageFsErr, "Eviction manager: failed to get HasDedicatedImageFs")
|
klog.ErrorS(splitDiskError, "Eviction manager: failed to get HasDedicatedImageFs")
|
||||||
return nil, fmt.Errorf("eviction manager: failed to get HasDedicatedImageFs: %w", imageFsErr)
|
return nil, fmt.Errorf("eviction manager: failed to get HasDedicatedImageFs: %w", splitDiskError)
|
||||||
}
|
}
|
||||||
m.dedicatedImageFs = &hasImageFs
|
m.dedicatedImageFs = &hasImageFs
|
||||||
splitContainerImageFs, splitErr := diskInfoProvider.HasDedicatedContainerFs(ctx)
|
splitContainerImageFs := m.containerGC.IsContainerFsSeparateFromImageFs(ctx)
|
||||||
if splitErr != nil {
|
|
||||||
klog.ErrorS(splitErr, "Eviction manager: failed to get HasDedicatedContainerFs")
|
|
||||||
return nil, fmt.Errorf("eviction manager: failed to get HasDedicatedContainerFs: %w", splitErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are a split filesystem but the feature is turned off
|
// If we are a split filesystem but the feature is turned off
|
||||||
// we should return an error.
|
// we should return an error.
|
||||||
|
@ -67,7 +67,6 @@ func (m *mockPodKiller) killPodNow(pod *v1.Pod, evict bool, gracePeriodOverride
|
|||||||
// mockDiskInfoProvider is used to simulate testing.
|
// mockDiskInfoProvider is used to simulate testing.
|
||||||
type mockDiskInfoProvider struct {
|
type mockDiskInfoProvider struct {
|
||||||
dedicatedImageFs *bool
|
dedicatedImageFs *bool
|
||||||
dedicatedContainerFs *bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasDedicatedImageFs returns the mocked value
|
// HasDedicatedImageFs returns the mocked value
|
||||||
@ -75,16 +74,12 @@ func (m *mockDiskInfoProvider) HasDedicatedImageFs(_ context.Context) (bool, err
|
|||||||
return ptr.Deref(m.dedicatedImageFs, false), nil
|
return ptr.Deref(m.dedicatedImageFs, false), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasDedicatedContainerFs returns the mocked value
|
|
||||||
func (m *mockDiskInfoProvider) HasDedicatedContainerFs(_ context.Context) (bool, error) {
|
|
||||||
return ptr.Deref(m.dedicatedContainerFs, false), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mockDiskGC is used to simulate invoking image and container garbage collection.
|
// mockDiskGC is used to simulate invoking image and container garbage collection.
|
||||||
type mockDiskGC struct {
|
type mockDiskGC struct {
|
||||||
err error
|
err error
|
||||||
imageGCInvoked bool
|
imageGCInvoked bool
|
||||||
containerGCInvoked bool
|
containerGCInvoked bool
|
||||||
|
readAndWriteSeparate bool
|
||||||
fakeSummaryProvider *fakeSummaryProvider
|
fakeSummaryProvider *fakeSummaryProvider
|
||||||
summaryAfterGC *statsapi.Summary
|
summaryAfterGC *statsapi.Summary
|
||||||
}
|
}
|
||||||
@ -107,6 +102,10 @@ func (m *mockDiskGC) DeleteAllUnusedContainers(_ context.Context) error {
|
|||||||
return m.err
|
return m.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockDiskGC) IsContainerFsSeparateFromImageFs(_ context.Context) bool {
|
||||||
|
return m.readAndWriteSeparate
|
||||||
|
}
|
||||||
|
|
||||||
func makePodWithMemoryStats(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, memoryWorkingSet string) (*v1.Pod, statsapi.PodStats) {
|
func makePodWithMemoryStats(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, memoryWorkingSet string) (*v1.Pod, statsapi.PodStats) {
|
||||||
pod := newPod(name, priority, []v1.Container{
|
pod := newPod(name, priority, []v1.Container{
|
||||||
newContainer(name, requests, limits),
|
newContainer(name, requests, limits),
|
||||||
@ -385,7 +384,7 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) {
|
|||||||
|
|
||||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||||
podKiller := &mockPodKiller{}
|
podKiller := &mockPodKiller{}
|
||||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false), dedicatedContainerFs: ptr.To(false)}
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
|
||||||
diskGC := &mockDiskGC{err: nil}
|
diskGC := &mockDiskGC{err: nil}
|
||||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||||
|
|
||||||
@ -563,8 +562,8 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
|
|||||||
|
|
||||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||||
podKiller := &mockPodKiller{}
|
podKiller := &mockPodKiller{}
|
||||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs, dedicatedContainerFs: &tc.writeableSeparateFromReadOnly}
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs}
|
||||||
diskGC := &mockDiskGC{err: nil}
|
diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly}
|
||||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||||
|
|
||||||
config := Config{
|
config := Config{
|
||||||
@ -1308,8 +1307,8 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|||||||
|
|
||||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||||
podKiller := &mockPodKiller{}
|
podKiller := &mockPodKiller{}
|
||||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs, dedicatedContainerFs: &tc.writeableSeparateFromReadOnly}
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs}
|
||||||
diskGC := &mockDiskGC{err: nil}
|
diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly}
|
||||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||||
|
|
||||||
config := Config{
|
config := Config{
|
||||||
@ -1830,7 +1829,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||||||
|
|
||||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||||
podKiller := &mockPodKiller{}
|
podKiller := &mockPodKiller{}
|
||||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs, dedicatedContainerFs: &tc.writeableSeparateFromReadOnly}
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs}
|
||||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||||
|
|
||||||
config := Config{
|
config := Config{
|
||||||
@ -1847,7 +1846,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||||||
// This is a constant that we use to test that disk pressure is over. Don't change!
|
// This is a constant that we use to test that disk pressure is over. Don't change!
|
||||||
diskStatConst := diskStatStart
|
diskStatConst := diskStatStart
|
||||||
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStatStart)}
|
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStatStart)}
|
||||||
diskGC := &mockDiskGC{fakeSummaryProvider: summaryProvider, err: nil}
|
diskGC := &mockDiskGC{fakeSummaryProvider: summaryProvider, err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly}
|
||||||
manager := &managerImpl{
|
manager := &managerImpl{
|
||||||
clock: fakeClock,
|
clock: fakeClock,
|
||||||
killPodFunc: podKiller.killPodNow,
|
killPodFunc: podKiller.killPodNow,
|
||||||
@ -2293,8 +2292,8 @@ func TestInodePressureFsInodes(t *testing.T) {
|
|||||||
|
|
||||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||||
podKiller := &mockPodKiller{}
|
podKiller := &mockPodKiller{}
|
||||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs, dedicatedContainerFs: &tc.writeableSeparateFromReadOnly}
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs}
|
||||||
diskGC := &mockDiskGC{err: nil}
|
diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly}
|
||||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||||
|
|
||||||
config := Config{
|
config := Config{
|
||||||
|
@ -75,8 +75,6 @@ type Manager interface {
|
|||||||
type DiskInfoProvider interface {
|
type DiskInfoProvider interface {
|
||||||
// HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs.
|
// HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs.
|
||||||
HasDedicatedImageFs(ctx context.Context) (bool, error)
|
HasDedicatedImageFs(ctx context.Context) (bool, error)
|
||||||
// HasDedicatedContainerFs returns true if the container fs is on a separate device from the rootfs.
|
|
||||||
HasDedicatedContainerFs(ctx context.Context) (bool, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageGC is responsible for performing garbage collection of unused images.
|
// ImageGC is responsible for performing garbage collection of unused images.
|
||||||
@ -89,6 +87,8 @@ type ImageGC interface {
|
|||||||
type ContainerGC interface {
|
type ContainerGC interface {
|
||||||
// DeleteAllUnusedContainers deletes all unused containers, even those that belong to pods that are terminated, but not deleted.
|
// DeleteAllUnusedContainers deletes all unused containers, even those that belong to pods that are terminated, but not deleted.
|
||||||
DeleteAllUnusedContainers(ctx context.Context) error
|
DeleteAllUnusedContainers(ctx context.Context) error
|
||||||
|
// IsContainerFsSeparateFromImageFs checks if container filesystem is split from image filesystem.
|
||||||
|
IsContainerFsSeparateFromImageFs(ctx context.Context) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// KillPodFunc kills a pod.
|
// KillPodFunc kills a pod.
|
||||||
|
@ -276,11 +276,11 @@ func (p *cadvisorStatsProvider) ImageFsStats(ctx context.Context) (imageFsRet *s
|
|||||||
if imageStats == nil || len(imageStats.ImageFilesystems) == 0 || len(imageStats.ContainerFilesystems) == 0 {
|
if imageStats == nil || len(imageStats.ImageFilesystems) == 0 || len(imageStats.ContainerFilesystems) == 0 {
|
||||||
return nil, nil, fmt.Errorf("missing image stats: %+v", imageStats)
|
return nil, nil, fmt.Errorf("missing image stats: %+v", imageStats)
|
||||||
}
|
}
|
||||||
|
|
||||||
splitFileSystem := false
|
splitFileSystem := false
|
||||||
imageFs := imageStats.ImageFilesystems[0]
|
imageFs := imageStats.ImageFilesystems[0]
|
||||||
containerFs := imageStats.ContainerFilesystems[0]
|
containerFs := imageStats.ContainerFilesystems[0]
|
||||||
if imageFs.FsId != nil && containerFs.FsId != nil && imageFs.FsId.Mountpoint != containerFs.FsId.Mountpoint {
|
if imageFs.FsId != nil && containerFs.FsId != nil && imageFs.FsId.Mountpoint != containerFs.FsId.Mountpoint {
|
||||||
|
klog.InfoS("Detect Split Filesystem", "ImageFilesystems", imageFs, "ContainerFilesystems", containerFs)
|
||||||
splitFileSystem = true
|
splitFileSystem = true
|
||||||
}
|
}
|
||||||
var imageFsInodesUsed *uint64
|
var imageFsInodesUsed *uint64
|
||||||
@ -312,12 +312,6 @@ func (p *cadvisorStatsProvider) ImageFsStats(ctx context.Context) (imageFsRet *s
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to get container fs info: %w", err)
|
return nil, nil, fmt.Errorf("failed to get container fs info: %w", err)
|
||||||
}
|
}
|
||||||
// ImageFs and ContainerFs could be on different paths on the same device.
|
|
||||||
if containerFsInfo.Device == imageFsInfo.Device {
|
|
||||||
return fsStats, fsStats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.InfoS("Detect Split Filesystem", "ImageFilesystems", imageStats.ImageFilesystems[0], "ContainerFilesystems", imageStats.ContainerFilesystems[0])
|
|
||||||
|
|
||||||
var containerFsInodesUsed *uint64
|
var containerFsInodesUsed *uint64
|
||||||
if containerFsInfo.Inodes != nil && containerFsInfo.InodesFree != nil {
|
if containerFsInfo.Inodes != nil && containerFsInfo.InodesFree != nil {
|
||||||
|
@ -680,7 +680,7 @@ func TestCadvisorSplitImagesFsStats(t *testing.T) {
|
|||||||
mockRuntime = containertest.NewMockRuntime(t)
|
mockRuntime = containertest.NewMockRuntime(t)
|
||||||
|
|
||||||
seed = 1000
|
seed = 1000
|
||||||
imageFsInfo = getTestFsInfoWithDifferentMount(seed, "image")
|
imageFsInfo = getTestFsInfo(seed)
|
||||||
containerSeed = 1001
|
containerSeed = 1001
|
||||||
containerFsInfo = getTestFsInfo(containerSeed)
|
containerFsInfo = getTestFsInfo(containerSeed)
|
||||||
)
|
)
|
||||||
@ -723,59 +723,7 @@ func TestCadvisorSplitImagesFsStats(t *testing.T) {
|
|||||||
assert.Equal(containerFsInfo.InodesFree, containerfs.InodesFree)
|
assert.Equal(containerFsInfo.InodesFree, containerfs.InodesFree)
|
||||||
assert.Equal(containerFsInfo.Inodes, containerfs.Inodes)
|
assert.Equal(containerFsInfo.Inodes, containerfs.Inodes)
|
||||||
assert.Equal(*containerFsInfo.Inodes-*containerFsInfo.InodesFree, *containerfs.InodesUsed)
|
assert.Equal(*containerFsInfo.Inodes-*containerFsInfo.InodesFree, *containerfs.InodesUsed)
|
||||||
}
|
|
||||||
|
|
||||||
func TestCadvisorSameDiskDifferentLocations(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
var (
|
|
||||||
assert = assert.New(t)
|
|
||||||
mockCadvisor = cadvisortest.NewMockInterface(t)
|
|
||||||
mockRuntime = containertest.NewMockRuntime(t)
|
|
||||||
|
|
||||||
seed = 1000
|
|
||||||
imageFsInfo = getTestFsInfo(seed)
|
|
||||||
containerSeed = 1001
|
|
||||||
containerFsInfo = getTestFsInfo(containerSeed)
|
|
||||||
)
|
|
||||||
imageFsInfoCRI := &runtimeapi.FilesystemUsage{
|
|
||||||
Timestamp: imageFsInfo.Timestamp.Unix(),
|
|
||||||
FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "images"},
|
|
||||||
UsedBytes: &runtimeapi.UInt64Value{Value: imageFsInfo.Usage},
|
|
||||||
InodesUsed: &runtimeapi.UInt64Value{Value: *imageFsInfo.Inodes},
|
|
||||||
}
|
|
||||||
containerFsInfoCRI := &runtimeapi.FilesystemUsage{
|
|
||||||
Timestamp: containerFsInfo.Timestamp.Unix(),
|
|
||||||
FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: "containers"},
|
|
||||||
UsedBytes: &runtimeapi.UInt64Value{Value: containerFsInfo.Usage},
|
|
||||||
InodesUsed: &runtimeapi.UInt64Value{Value: *containerFsInfo.Inodes},
|
|
||||||
}
|
|
||||||
imageFsInfoResponse := &runtimeapi.ImageFsInfoResponse{
|
|
||||||
ImageFilesystems: []*runtimeapi.FilesystemUsage{imageFsInfoCRI},
|
|
||||||
ContainerFilesystems: []*runtimeapi.FilesystemUsage{containerFsInfoCRI},
|
|
||||||
}
|
|
||||||
|
|
||||||
mockCadvisor.EXPECT().ImagesFsInfo().Return(imageFsInfo, nil)
|
|
||||||
mockCadvisor.EXPECT().ContainerFsInfo().Return(containerFsInfo, nil)
|
|
||||||
mockRuntime.EXPECT().ImageFsInfo(ctx).Return(imageFsInfoResponse, nil)
|
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletSeparateDiskGC, true)
|
|
||||||
|
|
||||||
provider := newCadvisorStatsProvider(mockCadvisor, &fakeResourceAnalyzer{}, mockRuntime, nil, NewFakeHostStatsProvider())
|
|
||||||
stats, containerfs, err := provider.ImageFsStats(ctx)
|
|
||||||
require.NoError(t, err, "imageFsStats should have no error")
|
|
||||||
|
|
||||||
assert.Equal(imageFsInfo.Timestamp, stats.Time.Time)
|
|
||||||
assert.Equal(imageFsInfo.Available, *stats.AvailableBytes)
|
|
||||||
assert.Equal(imageFsInfo.Capacity, *stats.CapacityBytes)
|
|
||||||
assert.Equal(imageFsInfo.InodesFree, stats.InodesFree)
|
|
||||||
assert.Equal(imageFsInfo.Inodes, stats.Inodes)
|
|
||||||
assert.Equal(*imageFsInfo.Inodes-*imageFsInfo.InodesFree, *stats.InodesUsed)
|
|
||||||
|
|
||||||
assert.Equal(imageFsInfo.Timestamp, containerfs.Time.Time)
|
|
||||||
assert.Equal(imageFsInfo.Available, *containerfs.AvailableBytes)
|
|
||||||
assert.Equal(imageFsInfo.Capacity, *containerfs.CapacityBytes)
|
|
||||||
assert.Equal(imageFsInfo.InodesFree, containerfs.InodesFree)
|
|
||||||
assert.Equal(imageFsInfo.Inodes, containerfs.Inodes)
|
|
||||||
assert.Equal(*imageFsInfo.Inodes-*imageFsInfo.InodesFree, *containerfs.InodesUsed)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) {
|
func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) {
|
||||||
|
@ -197,20 +197,6 @@ func (p *Provider) HasDedicatedImageFs(ctx context.Context) (bool, error) {
|
|||||||
return device != rootFsInfo.Device, nil
|
return device != rootFsInfo.Device, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasDedicatedImageFs returns true if a dedicated image filesystem exists for storing images.
|
|
||||||
// KEP Issue Number 4191: Enhanced this to allow for the containers to be separate from images.
|
|
||||||
func (p *Provider) HasDedicatedContainerFs(ctx context.Context) (bool, error) {
|
|
||||||
imageFs, err := p.cadvisor.ImagesFsInfo()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
containerFs, err := p.cadvisor.ContainerFsInfo()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return imageFs.Device != containerFs.Device, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalFileSystems(a, b *statsapi.FsStats) bool {
|
func equalFileSystems(a, b *statsapi.FsStats) bool {
|
||||||
if a == nil || b == nil {
|
if a == nil || b == nil {
|
||||||
return false
|
return false
|
||||||
|
@ -347,23 +347,6 @@ func getTestFsInfo(seed int) cadvisorapiv2.FsInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestFsInfoWithDifferentMount(seed int, device string) cadvisorapiv2.FsInfo {
|
|
||||||
var (
|
|
||||||
inodes = uint64(seed + offsetFsInodes)
|
|
||||||
inodesFree = uint64(seed + offsetFsInodesFree)
|
|
||||||
)
|
|
||||||
return cadvisorapiv2.FsInfo{
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
Device: device,
|
|
||||||
Mountpoint: "test-mount-point",
|
|
||||||
Capacity: uint64(seed + offsetFsCapacity),
|
|
||||||
Available: uint64(seed + offsetFsAvailable),
|
|
||||||
Usage: uint64(seed + offsetFsUsage),
|
|
||||||
Inodes: &inodes,
|
|
||||||
InodesFree: &inodesFree,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPodVolumeStats(seed int, volumeName string) statsapi.VolumeStats {
|
func getPodVolumeStats(seed int, volumeName string) statsapi.VolumeStats {
|
||||||
availableBytes := uint64(seed + offsetFsAvailable)
|
availableBytes := uint64(seed + offsetFsAvailable)
|
||||||
capacityBytes := uint64(seed + offsetFsCapacity)
|
capacityBytes := uint64(seed + offsetFsCapacity)
|
||||||
|
Loading…
Reference in New Issue
Block a user