mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #26451 from Random-Liu/cache_image_history
Automatic merge from submit-queue Kubelet: Cache image history to eliminate the performance regression Fix https://github.com/kubernetes/kubernetes/issues/25057. The image history operation takes almost 50% of cpu usage in kubelet performance test. We should cache image history instead of getting it from runtime everytime. This PR cached image history in imageStatsProvider and added unit test. @yujuhong @vishh /cc @kubernetes/sig-node Mark v1.3 because this is a relatively significant performance regression. []()
This commit is contained in:
commit
77de942e08
@ -30,7 +30,6 @@ import (
|
|||||||
dockercontainer "github.com/docker/engine-api/types/container"
|
dockercontainer "github.com/docker/engine-api/types/container"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup.
|
// FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup.
|
||||||
@ -49,7 +48,6 @@ type FakeDockerClient struct {
|
|||||||
Created []string
|
Created []string
|
||||||
Stopped []string
|
Stopped []string
|
||||||
Removed []string
|
Removed []string
|
||||||
RemovedImages sets.String
|
|
||||||
VersionInfo dockertypes.Version
|
VersionInfo dockertypes.Version
|
||||||
Information dockertypes.Info
|
Information dockertypes.Info
|
||||||
ExecInspect *dockertypes.ContainerExecInspect
|
ExecInspect *dockertypes.ContainerExecInspect
|
||||||
@ -68,10 +66,9 @@ func NewFakeDockerClient() *FakeDockerClient {
|
|||||||
|
|
||||||
func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient {
|
func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient {
|
||||||
return &FakeDockerClient{
|
return &FakeDockerClient{
|
||||||
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
|
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
|
||||||
Errors: make(map[string]error),
|
Errors: make(map[string]error),
|
||||||
RemovedImages: sets.String{},
|
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
|
||||||
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -471,6 +468,7 @@ func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
|
func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
|
||||||
|
f.called = append(f.called, "list_images")
|
||||||
err := f.popError("list_images")
|
err := f.popError("list_images")
|
||||||
return f.Images, err
|
return f.Images, err
|
||||||
}
|
}
|
||||||
@ -478,7 +476,12 @@ func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dock
|
|||||||
func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
|
func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
|
||||||
err := f.popError("remove_image")
|
err := f.popError("remove_image")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.RemovedImages.Insert(image)
|
for i := range f.Images {
|
||||||
|
if f.Images[i].ID == image {
|
||||||
|
f.Images = append(f.Images[:i], f.Images[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return []dockertypes.ImageDelete{{Deleted: image}}, err
|
return []dockertypes.ImageDelete{{Deleted: image}}, err
|
||||||
}
|
}
|
||||||
@ -538,6 +541,7 @@ func (f *FakeDockerPuller) IsImagePresent(name string) (bool, error) {
|
|||||||
func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
|
func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
f.called = append(f.called, "image_history")
|
||||||
history := f.ImageHistoryMap[id]
|
history := f.ImageHistoryMap[id]
|
||||||
return history, nil
|
return history, nil
|
||||||
}
|
}
|
||||||
|
@ -18,54 +18,85 @@ package dockertools
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
dockertypes "github.com/docker/engine-api/types"
|
dockertypes "github.com/docker/engine-api/types"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/container"
|
runtime "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// imageStatsProvider exposes stats about all images currently available.
|
// imageStatsProvider exposes stats about all images currently available.
|
||||||
type imageStatsProvider struct {
|
type imageStatsProvider struct {
|
||||||
|
sync.Mutex
|
||||||
|
// layers caches the current layers, key is the layer ID.
|
||||||
|
layers map[string]*dockertypes.ImageHistory
|
||||||
|
// imageToLayerIDs maps image to its layer IDs.
|
||||||
|
imageToLayerIDs map[string][]string
|
||||||
// Docker remote API client
|
// Docker remote API client
|
||||||
c DockerInterface
|
c DockerInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newImageStatsProvider(c DockerInterface) *imageStatsProvider {
|
||||||
|
return &imageStatsProvider{
|
||||||
|
layers: make(map[string]*dockertypes.ImageHistory),
|
||||||
|
imageToLayerIDs: make(map[string][]string),
|
||||||
|
c: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
|
func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
|
||||||
images, err := isp.c.ListImages(dockertypes.ImageListOptions{})
|
images, err := isp.c.ListImages(dockertypes.ImageListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to list docker images - %v", err)
|
return nil, fmt.Errorf("failed to list docker images - %v", err)
|
||||||
}
|
}
|
||||||
// A map of all the image layers to its corresponding size.
|
// Take the lock to protect the cache
|
||||||
imageMap := sets.NewString()
|
isp.Lock()
|
||||||
ret := &runtime.ImageStats{}
|
defer isp.Unlock()
|
||||||
|
// Create new cache each time, this is a little more memory consuming, but:
|
||||||
|
// * ImageStats is only called every 10 seconds
|
||||||
|
// * We use pointers and reference to copy cache elements.
|
||||||
|
// The memory usage should be acceptable.
|
||||||
|
// TODO(random-liu): Add more logic to implement in place cache update.
|
||||||
|
newLayers := make(map[string]*dockertypes.ImageHistory)
|
||||||
|
newImageToLayerIDs := make(map[string][]string)
|
||||||
for _, image := range images {
|
for _, image := range images {
|
||||||
// Get information about the various layers of each docker image.
|
layerIDs, ok := isp.imageToLayerIDs[image.ID]
|
||||||
history, err := isp.c.ImageHistory(image.ID)
|
if !ok {
|
||||||
if err != nil {
|
// Get information about the various layers of the given docker image.
|
||||||
glog.V(2).Infof("failed to get history of docker image %v - %v", image, err)
|
history, err := isp.c.ImageHistory(image.ID)
|
||||||
continue
|
if err != nil {
|
||||||
}
|
// Skip the image and inspect again in next ImageStats if the image is still there
|
||||||
// Store size information of each layer.
|
glog.V(2).Infof("failed to get history of docker image %+v - %v", image, err)
|
||||||
for _, layer := range history {
|
|
||||||
// Skip empty layers.
|
|
||||||
if layer.Size == 0 {
|
|
||||||
glog.V(10).Infof("skipping image layer %v with size 0", layer)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
key := layer.ID
|
// Cache each layer
|
||||||
// Some of the layers are empty.
|
for i := range history {
|
||||||
// We are hoping that these layers are unique to each image.
|
layer := &history[i]
|
||||||
// Still keying with the CreatedBy field to be safe.
|
key := layer.ID
|
||||||
if key == "" || key == "<missing>" {
|
// Some of the layers are empty.
|
||||||
key = key + layer.CreatedBy
|
// We are hoping that these layers are unique to each image.
|
||||||
|
// Still keying with the CreatedBy field to be safe.
|
||||||
|
if key == "" || key == "<missing>" {
|
||||||
|
key = key + layer.CreatedBy
|
||||||
|
}
|
||||||
|
layerIDs = append(layerIDs, key)
|
||||||
|
newLayers[key] = layer
|
||||||
}
|
}
|
||||||
if !imageMap.Has(key) {
|
} else {
|
||||||
ret.TotalStorageBytes += uint64(layer.Size)
|
for _, layerID := range layerIDs {
|
||||||
|
newLayers[layerID] = isp.layers[layerID]
|
||||||
}
|
}
|
||||||
imageMap.Insert(key)
|
|
||||||
}
|
}
|
||||||
|
newImageToLayerIDs[image.ID] = layerIDs
|
||||||
}
|
}
|
||||||
|
ret := &runtime.ImageStats{}
|
||||||
|
// Calculate the total storage bytes
|
||||||
|
for _, layer := range newLayers {
|
||||||
|
ret.TotalStorageBytes += uint64(layer.Size)
|
||||||
|
}
|
||||||
|
// Update current cache
|
||||||
|
isp.layers = newLayers
|
||||||
|
isp.imageToLayerIDs = newImageToLayerIDs
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
@ -25,10 +25,11 @@ import (
|
|||||||
|
|
||||||
func TestImageStatsNoImages(t *testing.T) {
|
func TestImageStatsNoImages(t *testing.T) {
|
||||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||||
isp := &imageStatsProvider{fakeDockerClient}
|
isp := newImageStatsProvider(fakeDockerClient)
|
||||||
st, err := isp.ImageStats()
|
st, err := isp.ImageStats()
|
||||||
as := assert.New(t)
|
as := assert.New(t)
|
||||||
as.NoError(err)
|
as.NoError(err)
|
||||||
|
as.NoError(fakeDockerClient.AssertCalls([]string{"list_images"}))
|
||||||
as.Equal(st.TotalStorageBytes, uint64(0))
|
as.Equal(st.TotalStorageBytes, uint64(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,10 +95,240 @@ func TestImageStatsWithImages(t *testing.T) {
|
|||||||
ID: "busybox-new",
|
ID: "busybox-new",
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
isp := &imageStatsProvider{fakeDockerClient}
|
isp := newImageStatsProvider(fakeDockerClient)
|
||||||
st, err := isp.ImageStats()
|
st, err := isp.ImageStats()
|
||||||
as := assert.New(t)
|
as := assert.New(t)
|
||||||
as.NoError(err)
|
as.NoError(err)
|
||||||
|
as.NoError(fakeDockerClient.AssertCalls([]string{"list_images", "image_history", "image_history", "image_history"}))
|
||||||
const expectedOutput uint64 = 1300
|
const expectedOutput uint64 = 1300
|
||||||
as.Equal(expectedOutput, st.TotalStorageBytes, "expected %d, got %d", expectedOutput, st.TotalStorageBytes)
|
as.Equal(expectedOutput, st.TotalStorageBytes, "expected %d, got %d", expectedOutput, st.TotalStorageBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestImageStatsWithCachedImages(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
oldLayers map[string]*dockertypes.ImageHistory
|
||||||
|
oldImageToLayerIDs map[string][]string
|
||||||
|
images []dockertypes.Image
|
||||||
|
history map[string][]dockertypes.ImageHistory
|
||||||
|
expectedCalls []string
|
||||||
|
expectedLayers map[string]*dockertypes.ImageHistory
|
||||||
|
expectedImageToLayerIDs map[string][]string
|
||||||
|
expectedTotalStorageSize uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// No cache
|
||||||
|
oldLayers: make(map[string]*dockertypes.ImageHistory),
|
||||||
|
oldImageToLayerIDs: make(map[string][]string),
|
||||||
|
images: []dockertypes.Image{
|
||||||
|
{
|
||||||
|
ID: "busybox",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "kubelet",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
history: map[string][]dockertypes.ImageHistory{
|
||||||
|
"busybox": {
|
||||||
|
{
|
||||||
|
ID: "0123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "baz",
|
||||||
|
Size: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"kubelet": {
|
||||||
|
{
|
||||||
|
ID: "1123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "1baz",
|
||||||
|
Size: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCalls: []string{"list_images", "image_history", "image_history"},
|
||||||
|
expectedLayers: map[string]*dockertypes.ImageHistory{
|
||||||
|
"0123456": {
|
||||||
|
ID: "0123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 100,
|
||||||
|
},
|
||||||
|
"1123456": {
|
||||||
|
ID: "1123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 200,
|
||||||
|
},
|
||||||
|
"<missing>baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "baz",
|
||||||
|
Size: 300,
|
||||||
|
},
|
||||||
|
"<missing>1baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "1baz",
|
||||||
|
Size: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedImageToLayerIDs: map[string][]string{
|
||||||
|
"busybox": {"0123456", "<missing>baz"},
|
||||||
|
"kubelet": {"1123456", "<missing>1baz"},
|
||||||
|
},
|
||||||
|
expectedTotalStorageSize: 1000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Use cache value
|
||||||
|
oldLayers: map[string]*dockertypes.ImageHistory{
|
||||||
|
"0123456": {
|
||||||
|
ID: "0123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 100,
|
||||||
|
},
|
||||||
|
"<missing>baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "baz",
|
||||||
|
Size: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
oldImageToLayerIDs: map[string][]string{
|
||||||
|
"busybox": {"0123456", "<missing>baz"},
|
||||||
|
},
|
||||||
|
images: []dockertypes.Image{
|
||||||
|
{
|
||||||
|
ID: "busybox",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "kubelet",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
history: map[string][]dockertypes.ImageHistory{
|
||||||
|
"busybox": {
|
||||||
|
{
|
||||||
|
ID: "0123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "baz",
|
||||||
|
Size: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"kubelet": {
|
||||||
|
{
|
||||||
|
ID: "1123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "1baz",
|
||||||
|
Size: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCalls: []string{"list_images", "image_history"},
|
||||||
|
expectedLayers: map[string]*dockertypes.ImageHistory{
|
||||||
|
"0123456": {
|
||||||
|
ID: "0123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 100,
|
||||||
|
},
|
||||||
|
"1123456": {
|
||||||
|
ID: "1123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 200,
|
||||||
|
},
|
||||||
|
"<missing>baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "baz",
|
||||||
|
Size: 300,
|
||||||
|
},
|
||||||
|
"<missing>1baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "1baz",
|
||||||
|
Size: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedImageToLayerIDs: map[string][]string{
|
||||||
|
"busybox": {"0123456", "<missing>baz"},
|
||||||
|
"kubelet": {"1123456", "<missing>1baz"},
|
||||||
|
},
|
||||||
|
expectedTotalStorageSize: 1000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Unused cache value
|
||||||
|
oldLayers: map[string]*dockertypes.ImageHistory{
|
||||||
|
"0123456": {
|
||||||
|
ID: "0123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 100,
|
||||||
|
},
|
||||||
|
"<missing>baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "baz",
|
||||||
|
Size: 300,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
oldImageToLayerIDs: map[string][]string{
|
||||||
|
"busybox": {"0123456", "<missing>baz"},
|
||||||
|
},
|
||||||
|
images: []dockertypes.Image{
|
||||||
|
{
|
||||||
|
ID: "kubelet",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
history: map[string][]dockertypes.ImageHistory{
|
||||||
|
"kubelet": {
|
||||||
|
{
|
||||||
|
ID: "1123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "1baz",
|
||||||
|
Size: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCalls: []string{"list_images", "image_history"},
|
||||||
|
expectedLayers: map[string]*dockertypes.ImageHistory{
|
||||||
|
"1123456": {
|
||||||
|
ID: "1123456",
|
||||||
|
CreatedBy: "foo",
|
||||||
|
Size: 200,
|
||||||
|
},
|
||||||
|
"<missing>1baz": {
|
||||||
|
ID: "<missing>",
|
||||||
|
CreatedBy: "1baz",
|
||||||
|
Size: 400,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedImageToLayerIDs: map[string][]string{
|
||||||
|
"kubelet": {"1123456", "<missing>1baz"},
|
||||||
|
},
|
||||||
|
expectedTotalStorageSize: 600,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||||
|
fakeDockerClient.InjectImages(test.images)
|
||||||
|
fakeDockerClient.InjectImageHistory(test.history)
|
||||||
|
isp := newImageStatsProvider(fakeDockerClient)
|
||||||
|
isp.layers = test.oldLayers
|
||||||
|
isp.imageToLayerIDs = test.oldImageToLayerIDs
|
||||||
|
st, err := isp.ImageStats()
|
||||||
|
as := assert.New(t)
|
||||||
|
as.NoError(err)
|
||||||
|
as.NoError(fakeDockerClient.AssertCalls(test.expectedCalls))
|
||||||
|
as.Equal(test.expectedLayers, isp.layers, "expected %+v, got %+v", test.expectedLayers, isp.layers)
|
||||||
|
as.Equal(test.expectedImageToLayerIDs, isp.imageToLayerIDs, "expected %+v, got %+v", test.expectedImageToLayerIDs, isp.imageToLayerIDs)
|
||||||
|
as.Equal(test.expectedTotalStorageSize, st.TotalStorageBytes, "expected %d, got %d", test.expectedTotalStorageSize, st.TotalStorageBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -254,7 +254,7 @@ func NewDockerManager(
|
|||||||
cpuCFSQuota: cpuCFSQuota,
|
cpuCFSQuota: cpuCFSQuota,
|
||||||
enableCustomMetrics: enableCustomMetrics,
|
enableCustomMetrics: enableCustomMetrics,
|
||||||
configureHairpinMode: hairpinMode,
|
configureHairpinMode: hairpinMode,
|
||||||
imageStatsProvider: &imageStatsProvider{client},
|
imageStatsProvider: newImageStatsProvider(client),
|
||||||
seccompProfileRoot: seccompProfileRoot,
|
seccompProfileRoot: seccompProfileRoot,
|
||||||
}
|
}
|
||||||
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
|
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
|
||||||
|
Loading…
Reference in New Issue
Block a user