use memory metrics from the pod cgroup for eviction ranking

This commit is contained in:
David Ashpole 2018-05-24 10:59:53 -07:00
parent 70df783b3c
commit 2be67e7dde
2 changed files with 19 additions and 57 deletions

View File

@ -389,16 +389,6 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt
}, nil
}
// podMemoryUsage aggregates pod memory usage.
func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) {
memory := resource.Quantity{Format: resource.BinarySI}
for _, container := range podStats.Containers {
// memory usage (if known)
memory.Add(*memoryUsage(container.Memory))
}
return v1.ResourceList{v1.ResourceMemory: memory}, nil
}
// localEphemeralVolumeNames returns the set of ephemeral volumes for the pod that are local
func localEphemeralVolumeNames(pod *v1.Pod) []string {
result := []string{}
@ -543,15 +533,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc {
return cmpBool(!p1Found, !p2Found)
}
p1Usage, p1Err := podMemoryUsage(p1Stats)
p2Usage, p2Err := podMemoryUsage(p2Stats)
if p1Err != nil || p2Err != nil {
// prioritize evicting the pod which had an error getting stats
return cmpBool(p1Err != nil, p2Err != nil)
}
p1Memory := p1Usage[v1.ResourceMemory]
p2Memory := p2Usage[v1.ResourceMemory]
p1Memory := memoryUsage(p1Stats.Memory)
p2Memory := memoryUsage(p2Stats.Memory)
p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1
p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1
// prioritize evicting the pod which exceeds its requests
@ -569,24 +552,17 @@ func memory(stats statsFunc) cmpFunc {
return cmpBool(!p1Found, !p2Found)
}
p1Usage, p1Err := podMemoryUsage(p1Stats)
p2Usage, p2Err := podMemoryUsage(p2Stats)
if p1Err != nil || p2Err != nil {
// prioritize evicting the pod which had an error getting stats
return cmpBool(p1Err != nil, p2Err != nil)
}
// adjust p1, p2 usage relative to the request (if any)
p1Memory := p1Usage[v1.ResourceMemory]
p1Memory := memoryUsage(p1Stats.Memory)
p1Request := podRequest(p1, v1.ResourceMemory)
p1Memory.Sub(p1Request)
p2Memory := p2Usage[v1.ResourceMemory]
p2Memory := memoryUsage(p2Stats.Memory)
p2Request := podRequest(p2, v1.ResourceMemory)
p2Memory.Sub(p2Request)
// prioritize evicting the pod which has the larger consumption of memory
return p2Memory.Cmp(p1Memory)
return p2Memory.Cmp(*p1Memory)
}
}

View File

@ -1015,23 +1015,17 @@ func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) {
// newPodStats returns a pod stat where each container is using the specified working set
// each pod must have a Name, UID, Namespace
func newPodStats(pod *v1.Pod, containerWorkingSetBytes int64) statsapi.PodStats {
result := statsapi.PodStats{
func newPodStats(pod *v1.Pod, podWorkingSetBytes uint64) statsapi.PodStats {
return statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
},
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &podWorkingSetBytes,
},
}
val := uint64(containerWorkingSetBytes)
for range pod.Spec.Containers {
result.Containers = append(result.Containers, statsapi.ContainerStats{
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &val,
},
})
}
return result
}
func TestMakeSignalObservations(t *testing.T) {
@ -1096,9 +1090,9 @@ func TestMakeSignalObservations(t *testing.T) {
podMaker("pod1", "ns2", "uuid2", 1),
podMaker("pod3", "ns3", "uuid3", 1),
}
containerWorkingSetBytes := int64(1024 * 1024 * 1024)
podWorkingSetBytes := uint64(1024 * 1024 * 1024)
for _, pod := range pods {
fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, containerWorkingSetBytes))
fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, podWorkingSetBytes))
}
res := quantityMustParse("5Gi")
// Allocatable thresholds are always 100%. Verify that Threshold == Capacity.
@ -1171,11 +1165,8 @@ func TestMakeSignalObservations(t *testing.T) {
if !found {
t.Errorf("Pod stats were not found for pod %v", pod.UID)
}
for _, container := range podStats.Containers {
actual := int64(*container.Memory.WorkingSetBytes)
if containerWorkingSetBytes != actual {
t.Errorf("Container working set expected %v, actual: %v", containerWorkingSetBytes, actual)
}
if *podStats.Memory.WorkingSetBytes != podWorkingSetBytes {
t.Errorf("Pod working set expected %v, actual: %v", podWorkingSetBytes, *podStats.Memory.WorkingSetBytes)
}
}
}
@ -1851,20 +1842,15 @@ func newPodDiskStats(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resou
}
func newPodMemoryStats(pod *v1.Pod, workingSet resource.Quantity) statsapi.PodStats {
result := statsapi.PodStats{
workingSetBytes := uint64(workingSet.Value())
return statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
},
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &workingSetBytes,
},
}
for range pod.Spec.Containers {
workingSetBytes := uint64(workingSet.Value())
result.Containers = append(result.Containers, statsapi.ContainerStats{
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &workingSetBytes,
},
})
}
return result
}
func newResourceList(cpu, memory, disk string) v1.ResourceList {