Cache image history

This commit is contained in:
Random-Liu 2016-05-27 13:42:09 -07:00
parent d2a45f0ba5
commit 56bde2df9f
2 changed files with 56 additions and 25 deletions

View File

@ -18,42 +18,61 @@ package dockertools
import (
"fmt"
"sync"
"github.com/golang/glog"
dockertypes "github.com/docker/engine-api/types"
runtime "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/sets"
)
// imageStatsProvider exposes stats about all images currently available.
type imageStatsProvider struct {
sync.Mutex
// layers caches the current layers, key is the layer ID.
layers map[string]*dockertypes.ImageHistory
// imageToLayerIDs maps image to its layer IDs.
imageToLayerIDs map[string][]string
// Docker remote API client
c DockerInterface
}
func newImageStatsProvider(c DockerInterface) *imageStatsProvider {
return &imageStatsProvider{
layers: make(map[string]*dockertypes.ImageHistory),
imageToLayerIDs: make(map[string][]string),
c: c,
}
}
func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
images, err := isp.c.ListImages(dockertypes.ImageListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list docker images - %v", err)
}
// A map of all the image layers to its corresponding size.
imageMap := sets.NewString()
ret := &runtime.ImageStats{}
// Take the lock to protect the cache
isp.Lock()
defer isp.Unlock()
// Create new cache each time, this is a little more memory consuming, but:
// * ImageStats is only called every 10 seconds
// * We use pointers and reference to copy cache elements.
// The memory usage should be acceptable.
// TODO(random-liu): Add more logic to implement in place cache update.
newLayers := make(map[string]*dockertypes.ImageHistory)
newImageToLayerIDs := make(map[string][]string)
for _, image := range images {
// Get information about the various layers of each docker image.
layerIDs, ok := isp.imageToLayerIDs[image.ID]
if !ok {
// Get information about the various layers of the given docker image.
history, err := isp.c.ImageHistory(image.ID)
if err != nil {
glog.V(2).Infof("failed to get history of docker image %v - %v", image, err)
continue
}
// Store size information of each layer.
for _, layer := range history {
// Skip empty layers.
if layer.Size == 0 {
glog.V(10).Infof("skipping image layer %v with size 0", layer)
// Skip the image and inspect again in next ImageStats if the image is still there
glog.V(2).Infof("failed to get history of docker image %+v - %v", image, err)
continue
}
// Cache each layer
for i := range history {
layer := &history[i]
key := layer.ID
// Some of the layers are empty.
// We are hoping that these layers are unique to each image.
@ -61,11 +80,23 @@ func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
if key == "" || key == "<missing>" {
key = key + layer.CreatedBy
}
if !imageMap.Has(key) {
layerIDs = append(layerIDs, key)
newLayers[key] = layer
}
} else {
for _, layerID := range layerIDs {
newLayers[layerID] = isp.layers[layerID]
}
}
newImageToLayerIDs[image.ID] = layerIDs
}
ret := &runtime.ImageStats{}
// Calculate the total storage bytes
for _, layer := range newLayers {
ret.TotalStorageBytes += uint64(layer.Size)
}
imageMap.Insert(key)
}
}
// Update current cache
isp.layers = newLayers
isp.imageToLayerIDs = newImageToLayerIDs
return ret, nil
}

View File

@ -254,7 +254,7 @@ func NewDockerManager(
cpuCFSQuota: cpuCFSQuota,
enableCustomMetrics: enableCustomMetrics,
configureHairpinMode: hairpinMode,
imageStatsProvider: &imageStatsProvider{client},
imageStatsProvider: newImageStatsProvider(client),
seccompProfileRoot: seccompProfileRoot,
}
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)