Add Pod-level local ephemeral storage metric in Summary API

This PR adds pod-level ephemeral storage metric into Summary API.
Pod-level ephemeral storage usage is the sum of all containers and local
ephemeral volume including EmptyDir (if not backed up by memory or
hugepages), configueMap, and downwardAPI.
This commit is contained in:
Jing Xu 2017-11-09 16:48:33 -08:00
parent db4134d03f
commit 75ef18c4d3
9 changed files with 234 additions and 34 deletions

View File

@ -95,6 +95,9 @@ type PodStats struct {
// +patchMergeKey=name
// +patchStrategy=merge
VolumeStats []VolumeStats `json:"volume,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// EphemeralStorage reports the total filesystem usage for the containers and emptyDir-backed volumes in the measured Pod.
// +optional
EphemeralStorage *FsStats `json:"ephemeral-storage,omitempty"`
}
// ContainerStats holds container-level unprocessed sample stats.

View File

@ -42,9 +42,11 @@ type volumeStatCalculator struct {
latest atomic.Value
}
// PodVolumeStats encapsulates all VolumeStats for a pod
// PodVolumeStats encapsulates the VolumeStats for a pod.
// It consists of two lists, for local ephemeral volumes, and for persistent volumes respectively.
type PodVolumeStats struct {
Volumes []stats.VolumeStats
EphemeralVolumes []stats.VolumeStats
PersistentVolumes []stats.VolumeStats
}
// newVolumeStatCalculator creates a new VolumeStatCalculator
@ -101,7 +103,8 @@ func (s *volumeStatCalculator) calcAndStoreStats() {
}
// Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats
fsStats := make([]stats.VolumeStats, 0, len(volumes))
ephemeralStats := []stats.VolumeStats{}
persistentStats := []stats.VolumeStats{}
for name, v := range volumes {
metric, err := v.GetMetrics()
if err != nil {
@ -113,31 +116,38 @@ func (s *volumeStatCalculator) calcAndStoreStats() {
}
// Lookup the volume spec and add a 'PVCReference' for volumes that reference a PVC
volSpec := volumesSpec[name]
var pvcRef *stats.PVCReference
if pvcSource := volSpec.PersistentVolumeClaim; pvcSource != nil {
pvcRef := stats.PVCReference{
pvcRef = &stats.PVCReference{
Name: pvcSource.ClaimName,
Namespace: s.pod.GetNamespace(),
}
fsStats = append(fsStats, s.parsePodVolumeStats(name, &pvcRef, metric))
// Set the PVC's prometheus metrics
s.setPVCMetrics(&pvcRef, metric)
} else {
fsStats = append(fsStats, s.parsePodVolumeStats(name, nil, metric))
s.setPVCMetrics(pvcRef, metric)
}
volumeStats := s.parsePodVolumeStats(name, pvcRef, metric, volSpec)
if isVolumeEphemeral(volSpec) {
ephemeralStats = append(ephemeralStats, volumeStats)
} else {
persistentStats = append(persistentStats, volumeStats)
}
}
// Store the new stats
s.latest.Store(PodVolumeStats{Volumes: fsStats})
s.latest.Store(PodVolumeStats{EphemeralVolumes: ephemeralStats,
PersistentVolumes: persistentStats})
}
// parsePodVolumeStats converts (internal) volume.Metrics to (external) stats.VolumeStats structures
func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats.PVCReference, metric *volume.Metrics) stats.VolumeStats {
func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats.PVCReference, metric *volume.Metrics, volSpec v1.Volume) stats.VolumeStats {
available := uint64(metric.Available.Value())
capacity := uint64(metric.Capacity.Value())
used := uint64(metric.Used.Value())
inodes := uint64(metric.Inodes.Value())
inodesFree := uint64(metric.InodesFree.Value())
inodesUsed := uint64(metric.InodesUsed.Value())
return stats.VolumeStats{
Name: podName,
PVCRef: pvcRef,
@ -146,6 +156,14 @@ func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats
}
}
func isVolumeEphemeral(volume v1.Volume) bool {
if (volume.EmptyDir != nil && volume.EmptyDir.Medium == v1.StorageMediumDefault) ||
volume.ConfigMap != nil || volume.GitRepo != nil {
return true
}
return false
}
// setPVCMetrics sets the given PVC's prometheus metrics to match the given volume.Metrics
func (s *volumeStatCalculator) setPVCMetrics(pvcRef *stats.PVCReference, metric *volume.Metrics) {
metrics.VolumeStatsAvailableBytes.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.Available.Value()))

View File

@ -17,10 +17,11 @@ limitations under the License.
package stats
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/stretchr/testify/assert"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -84,14 +85,14 @@ func TestPVCRef(t *testing.T) {
statsCalculator.calcAndStoreStats()
vs, _ := statsCalculator.GetLatest()
assert.Len(t, vs.Volumes, 2)
assert.Len(t, append(vs.EphemeralVolumes, vs.PersistentVolumes...), 2)
// Verify 'vol0' doesn't have a PVC reference
assert.Contains(t, vs.Volumes, kubestats.VolumeStats{
assert.Contains(t, append(vs.EphemeralVolumes, vs.PersistentVolumes...), kubestats.VolumeStats{
Name: vol0,
FsStats: expectedFSStats(),
})
// Verify 'vol1' has a PVC reference
assert.Contains(t, vs.Volumes, kubestats.VolumeStats{
assert.Contains(t, append(vs.EphemeralVolumes, vs.PersistentVolumes...), kubestats.VolumeStats{
Name: vol1,
PVCRef: &kubestats.PVCReference{
Name: pvcClaimName,

View File

@ -134,15 +134,66 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
for _, podStats := range podToStats {
// Lookup the volume stats for each pod.
podUID := types.UID(podStats.PodRef.UID)
var ephemeralStats []statsapi.VolumeStats
if vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID); found {
podStats.VolumeStats = vstats.Volumes
ephemeralStats = make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes))
copy(ephemeralStats, vstats.EphemeralVolumes)
podStats.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...)
}
podStats.EphemeralStorage = calcEphemeralStorage(podStats.Containers, ephemeralStats, &rootFsInfo)
result = append(result, *podStats)
}
return result, nil
}
func calcEphemeralStorage(containers []statsapi.ContainerStats, volumes []statsapi.VolumeStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.FsStats {
result := &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
}
for _, container := range containers {
addContainerUsage(result, &container)
}
for _, volume := range volumes {
result.UsedBytes = addUsage(result.UsedBytes, volume.FsStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, volume.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &volume.FsStats.Time)
}
return result
}
func addContainerUsage(stat *statsapi.FsStats, container *statsapi.ContainerStats) {
if rootFs := container.Rootfs; rootFs != nil {
stat.Time = maxUpdateTime(&stat.Time, &rootFs.Time)
stat.InodesUsed = addUsage(stat.InodesUsed, rootFs.InodesUsed)
stat.UsedBytes = addUsage(stat.UsedBytes, rootFs.UsedBytes)
if logs := container.Logs; logs != nil {
stat.UsedBytes = addUsage(stat.UsedBytes, logs.UsedBytes)
stat.Time = maxUpdateTime(&stat.Time, &logs.Time)
}
}
}
func maxUpdateTime(first, second *metav1.Time) metav1.Time {
if first.Before(second) {
return *second
}
return *first
}
func addUsage(first, second *uint64) *uint64 {
if first == nil {
return second
} else if second == nil {
return first
}
total := *first + *second
return &total
}
// ImageFsStats returns the stats of the filesystem for storing images.
func (p *cadvisorStatsProvider) ImageFsStats() (*statsapi.FsStats, error) {
imageFsInfo, err := p.cadvisor.ImagesFsInfo()

View File

@ -27,6 +27,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/leaky"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
)
func TestRemoveTerminatedContainerInfo(t *testing.T) {
@ -79,17 +80,21 @@ func TestCadvisorListPodStats(t *testing.T) {
namespace2 = "test2"
)
const (
seedRoot = 0
seedRuntime = 100
seedKubelet = 200
seedMisc = 300
seedPod0Infra = 1000
seedPod0Container0 = 2000
seedPod0Container1 = 2001
seedPod1Infra = 3000
seedPod1Container = 4000
seedPod2Infra = 5000
seedPod2Container = 6000
seedRoot = 0
seedRuntime = 100
seedKubelet = 200
seedMisc = 300
seedPod0Infra = 1000
seedPod0Container0 = 2000
seedPod0Container1 = 2001
seedPod1Infra = 3000
seedPod1Container = 4000
seedPod2Infra = 5000
seedPod2Container = 6000
seedEphemeralVolume1 = 10000
seedEphemeralVolume2 = 10001
seedPersistentVolume1 = 20000
seedPersistentVolume2 = 20001
)
const (
pName0 = "pod0"
@ -181,7 +186,16 @@ func TestCadvisorListPodStats(t *testing.T) {
mockRuntime.
On("ImageStats").Return(&kubecontainer.ImageStats{TotalStorageBytes: 123}, nil)
resourceAnalyzer := &fakeResourceAnalyzer{}
ephemeralVolumes := []statsapi.VolumeStats{getPodVolumeStats(seedEphemeralVolume1, "ephemeralVolume1"),
getPodVolumeStats(seedEphemeralVolume2, "ephemeralVolume2")}
persistentVolumes := []statsapi.VolumeStats{getPodVolumeStats(seedPersistentVolume1, "persistentVolume1"),
getPodVolumeStats(seedPersistentVolume2, "persistentVolume2")}
volumeStats := serverstats.PodVolumeStats{
EphemeralVolumes: ephemeralVolumes,
PersistentVolumes: persistentVolumes,
}
resourceAnalyzer := &fakeResourceAnalyzer{podVolumeStats: volumeStats}
p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, mockRuntime)
pods, err := p.ListPodStats()
@ -213,6 +227,7 @@ func TestCadvisorListPodStats(t *testing.T) {
assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix())
checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network)
checkEphemeralStats(t, "Pod0", []int{seedPod0Container0, seedPod0Container1}, []int{seedEphemeralVolume1, seedEphemeralVolume2}, ps.EphemeralStorage)
// Validate Pod1 Results
ps, found = indexPods[prf1]

View File

@ -131,14 +131,16 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
// container belongs to.
ps, found := sandboxIDToPodStats[podSandboxID]
if !found {
ps = p.makePodStats(podSandbox)
ps = buildPodStats(podSandbox)
sandboxIDToPodStats[podSandboxID] = ps
}
ps.Containers = append(ps.Containers, *p.makeContainerStats(stats, container, &rootFsInfo, uuidToFsInfo))
containerStats := p.makeContainerStats(stats, container, &rootFsInfo, uuidToFsInfo)
ps.Containers = append(ps.Containers, *containerStats)
}
result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats))
for _, s := range sandboxIDToPodStats {
p.makePodStorageStats(s, &rootFsInfo)
result = append(result, *s)
}
return result, nil
@ -199,8 +201,9 @@ func (p *criStatsProvider) getFsInfo(storageID *runtimeapi.StorageIdentifier) *c
return &fsInfo
}
func (p *criStatsProvider) makePodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
s := &statsapi.PodStats{
// buildPodRef returns a PodStats that identifies the Pod managing cinfo
func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
return &statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: podSandbox.Metadata.Name,
UID: podSandbox.Metadata.Uid,
@ -210,9 +213,15 @@ func (p *criStatsProvider) makePodStats(podSandbox *runtimeapi.PodSandbox) *stat
StartTime: metav1.NewTime(time.Unix(0, podSandbox.CreatedAt)),
// Network stats are not supported by CRI.
}
}
func (p *criStatsProvider) makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.PodStats {
podUID := types.UID(s.PodRef.UID)
if vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID); found {
s.VolumeStats = vstats.Volumes
ephemeralStats := make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes))
copy(ephemeralStats, vstats.EphemeralVolumes)
s.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...)
s.EphemeralStorage = calcEphemeralStorage(s.Containers, ephemeralStats, rootFsInfo)
}
return s
}

View File

@ -25,12 +25,14 @@ import (
"github.com/stretchr/testify/assert"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
critest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
)
func TestCRIListPodStats(t *testing.T) {
@ -80,6 +82,13 @@ func TestCRIListPodStats(t *testing.T) {
containerStats0, containerStats1, containerStats2, containerStats3, containerStats4,
})
ephemeralVolumes := makeFakeVolumeStats([]string{"ephVolume1, ephVolumes2"})
persistentVolumes := makeFakeVolumeStats([]string{"persisVolume1, persisVolumes2"})
resourceAnalyzer.podVolumeStats = serverstats.PodVolumeStats{
EphemeralVolumes: ephemeralVolumes,
PersistentVolumes: persistentVolumes,
}
provider := NewCRIStatsProvider(
mockCadvisor,
resourceAnalyzer,
@ -102,6 +111,8 @@ func TestCRIListPodStats(t *testing.T) {
assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano())
assert.Equal(2, len(p0.Containers))
checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats0, containerStats1})
containerStatsMap := make(map[string]statsapi.ContainerStats)
for _, s := range p0.Containers {
containerStatsMap[s.Name] = s
@ -121,6 +132,7 @@ func TestCRIListPodStats(t *testing.T) {
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
assert.Equal(1, len(p1.Containers))
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2})
c2 := p1.Containers[0]
assert.Equal("container2-name", c2.Name)
assert.Equal(container2.CreatedAt, c2.StartTime.UnixNano())
@ -132,11 +144,14 @@ func TestCRIListPodStats(t *testing.T) {
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
assert.Equal(1, len(p2.Containers))
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4})
c3 := p2.Containers[0]
assert.Equal("container3-name", c3.Name)
assert.Equal(container4.CreatedAt, c3.StartTime.UnixNano())
checkCRICPUAndMemoryStats(assert, c3, containerStats4)
checkCRIRootfsStats(assert, c3, containerStats4, &imageFsInfo)
checkCRILogsStats(assert, c3, &rootFsInfo)
mockCadvisor.AssertExpectations(t)
@ -236,8 +251,8 @@ func makeFakeContainerStats(container *critest.FakeContainer, imageFsUUID string
WritableLayer: &runtimeapi.FilesystemUsage{
Timestamp: time.Now().UnixNano(),
StorageId: &runtimeapi.StorageIdentifier{Uuid: imageFsUUID},
UsedBytes: &runtimeapi.UInt64Value{Value: rand.Uint64()},
InodesUsed: &runtimeapi.UInt64Value{Value: rand.Uint64()},
UsedBytes: &runtimeapi.UInt64Value{Value: rand.Uint64() / 100},
InodesUsed: &runtimeapi.UInt64Value{Value: rand.Uint64() / 100},
},
}
if container.State == runtimeapi.ContainerState_CONTAINER_EXITED {
@ -265,6 +280,32 @@ func makeFakeImageFsUsage(fsUUID string) *runtimeapi.FilesystemUsage {
}
}
func makeFakeVolumeStats(volumeNames []string) []statsapi.VolumeStats {
volumes := make([]statsapi.VolumeStats, len(volumeNames))
availableBytes := rand.Uint64()
capacityBytes := rand.Uint64()
usedBytes := rand.Uint64() / 100
inodes := rand.Uint64()
inodesFree := rand.Uint64()
inodesUsed := rand.Uint64() / 100
for i, name := range volumeNames {
fsStats := statsapi.FsStats{
Time: metav1.NewTime(time.Now()),
AvailableBytes: &availableBytes,
CapacityBytes: &capacityBytes,
UsedBytes: &usedBytes,
Inodes: &inodes,
InodesFree: &inodesFree,
InodesUsed: &inodesUsed,
}
volumes[i] = statsapi.VolumeStats{
FsStats: fsStats,
Name: name,
}
}
return volumes
}
func checkCRICPUAndMemoryStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *runtimeapi.ContainerStats) {
assert.Equal(cs.Cpu.Timestamp, actual.CPU.Time.UnixNano())
assert.Equal(cs.Cpu.UsageCoreNanoSeconds.Value, *actual.CPU.UsageCoreNanoSeconds)
@ -305,3 +346,18 @@ func checkCRILogsStats(assert *assert.Assertions, actual statsapi.ContainerStats
assert.Nil(actual.Logs.UsedBytes)
assert.Nil(actual.Logs.InodesUsed)
}
func checkEphemeralStorageStats(assert *assert.Assertions, actual statsapi.PodStats, volumes []statsapi.VolumeStats, containers []*runtimeapi.ContainerStats) {
var totalUsed, inodesUsed uint64
for _, container := range containers {
totalUsed = totalUsed + container.WritableLayer.UsedBytes.Value
inodesUsed = inodesUsed + container.WritableLayer.InodesUsed.Value
}
for _, volume := range volumes {
totalUsed = totalUsed + *volume.FsStats.UsedBytes
inodesUsed = inodesUsed + *volume.FsStats.InodesUsed
}
assert.Equal(int(*actual.EphemeralStorage.UsedBytes), int(totalUsed))
assert.Equal(int(*actual.EphemeralStorage.InodesUsed), int(inodesUsed))
}

View File

@ -59,6 +59,7 @@ const (
offsetFsTotalUsageBytes
offsetFsBaseUsageBytes
offsetFsInodeUsage
offsetVolume
)
var (
@ -456,6 +457,28 @@ func getTestFsInfo(seed int) cadvisorapiv2.FsInfo {
}
}
func getPodVolumeStats(seed int, volumeName string) statsapi.VolumeStats {
availableBytes := uint64(seed + offsetFsAvailable)
capacityBytes := uint64(seed + offsetFsCapacity)
usedBytes := uint64(seed + offsetFsUsage)
inodes := uint64(seed + offsetFsInodes)
inodesFree := uint64(seed + offsetFsInodesFree)
inodesUsed := uint64(seed + offsetFsInodeUsage)
fsStats := statsapi.FsStats{
Time: metav1.NewTime(time.Now()),
AvailableBytes: &availableBytes,
CapacityBytes: &capacityBytes,
UsedBytes: &usedBytes,
Inodes: &inodes,
InodesFree: &inodesFree,
InodesUsed: &inodesUsed,
}
return statsapi.VolumeStats{
FsStats: fsStats,
Name: volumeName,
}
}
func generateCustomMetricSpec() []cadvisorapiv1.MetricSpec {
f := fuzz.New().NilChance(0).Funcs(
func(e *cadvisorapiv1.MetricSpec, c fuzz.Continue) {
@ -542,6 +565,20 @@ func checkFsStats(t *testing.T, label string, seed int, stats *statsapi.FsStats)
assert.EqualValues(t, seed+offsetFsInodesFree, *stats.InodesFree, label+".InodesFree")
}
func checkEphemeralStats(t *testing.T, label string, containerSeeds []int, volumeSeeds []int, stats *statsapi.FsStats) {
var usedBytes, inodeUsage int
for _, cseed := range containerSeeds {
usedBytes = usedBytes + cseed + offsetFsTotalUsageBytes
inodeUsage += cseed + offsetFsInodeUsage
}
for _, vseed := range volumeSeeds {
usedBytes = usedBytes + vseed + offsetFsUsage
inodeUsage += vseed + offsetFsInodeUsage
}
assert.EqualValues(t, usedBytes, int(*stats.UsedBytes), label+".UsedBytes")
assert.EqualValues(t, inodeUsage, int(*stats.InodesUsed), label+".InodesUsed")
}
type fakeResourceAnalyzer struct {
podVolumeStats serverstats.PodVolumeStats
}

View File

@ -190,7 +190,17 @@ var _ = framework.KubeDescribe("Summary API", func() {
}),
}),
}),
"EphemeralStorage": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(framework.Kb, 21*framework.Mb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
}),
})
matchExpectations := ptrMatchAllFields(gstruct.Fields{
"Node": gstruct.MatchAllFields(gstruct.Fields{
"NodeName": Equal(framework.TestContext.NodeName),