kubelet: imagegc: exempt sandbox image

This commit is contained in:
Seth Jennings 2018-01-12 12:00:59 -06:00
parent 2261f90b23
commit 19a546758c
5 changed files with 33 additions and 11 deletions

View File

@ -100,6 +100,9 @@ type realImageGCManager struct {
// imageCache is the cache of latest image list.
imageCache imageCache
// sandbox image exempted from GC
sandboxImage string
}
// imageCache caches latest result of ListImages.
@ -136,7 +139,7 @@ type imageRecord struct {
size int64
}
func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string) (ImageGCManager, error) {
// Validate policy.
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
@ -155,6 +158,7 @@ func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, r
recorder: recorder,
nodeRef: nodeRef,
initialized: false,
sandboxImage: sandboxImage,
}
return im, nil
@ -196,6 +200,12 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) {
func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) {
imagesInUse := sets.NewString()
// Always consider the container runtime pod sandbox image in use
imageRef, err := im.runtime.GetImageRef(container.ImageSpec{Image: im.sandboxImage})
if err == nil && imageRef != "" {
imagesInUse.Insert(imageRef)
}
images, err := im.runtime.ListImages()
if err != nil {
return imagesInUse, err

View File

@ -33,6 +33,7 @@ import (
)
var zero time.Time
var sandboxImage = "gcr.io/google_containers/pause-amd64:latest"
func newRealImageGCManager(policy ImageGCPolicy) (*realImageGCManager, *containertest.FakeRuntime, *statstest.StatsProvider) {
fakeRuntime := &containertest.FakeRuntime{}
@ -43,6 +44,7 @@ func newRealImageGCManager(policy ImageGCPolicy) (*realImageGCManager, *containe
imageRecords: make(map[string]*imageRecord),
statsProvider: mockStatsProvider,
recorder: &record.FakeRecorder{},
sandboxImage: sandboxImage,
}, fakeRuntime, mockStatsProvider
}
@ -176,6 +178,21 @@ func TestDetectImagesWithNewImage(t *testing.T) {
assert.Equal(zero, noContainer.lastUsed)
}
func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
{
ID: sandboxImage,
Size: 1024,
},
}
spaceFreed, err := manager.DeleteUnusedImages()
assert := assert.New(t)
require.NoError(t, err)
assert.EqualValues(0, spaceFreed)
}
func TestDetectImagesContainerStopped(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
@ -524,7 +541,7 @@ func TestValidateImageGCPolicy(t *testing.T) {
}
for _, tc := range testCases {
if _, err := NewImageGCManager(nil, nil, nil, nil, tc.imageGCPolicy); err != nil {
if _, err := NewImageGCManager(nil, nil, nil, nil, tc.imageGCPolicy, ""); err != nil {
if err.Error() != tc.expectErr {
t.Errorf("[%s:]Expected err:%v, but got:%v", tc.name, tc.expectErr, err.Error())
}

View File

@ -743,7 +743,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
// setup imageManager
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy)
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, crOptions.PodSandboxImage)
if err != nil {
return nil, fmt.Errorf("failed to initialize image manager: %v", err)
}

View File

@ -256,7 +256,7 @@ func newTestKubeletWithImageList(
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, "")
assert.NoError(t, err)
kubelet.imageManager = &fakeImageGCManager{
fakeImageService: fakeRuntime,

View File

@ -79,7 +79,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
return "", utilerrors.NewAggregate(pullErrs)
}
// GetImageRef gets the reference (digest or ID) of the image which has already been in
// GetImageRef gets the ID of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage.
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image.Image})
@ -90,12 +90,7 @@ func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (
if status == nil {
return "", nil
}
imageRef := status.Id
if len(status.RepoDigests) > 0 {
imageRef = status.RepoDigests[0]
}
return imageRef, nil
return status.Id, nil
}
// ListImages gets all images currently on the machine.