Merge pull request #79247 from egernst/kubelet-PodOverhead

Kubelet enabling to support pod-overhead
This commit is contained in:
Kubernetes Prow Robot 2019-08-20 13:27:15 -07:00 committed by GitHub
commit 8cf05f514c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 242 additions and 108 deletions

View File

@ -11,8 +11,12 @@ go_test(
srcs = ["helpers_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
@ -22,8 +26,10 @@ go_library(
srcs = ["helpers.go"],
importpath = "k8s.io/kubernetes/pkg/api/v1/resource",
deps = [
"//pkg/features:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)

View File

@ -23,11 +23,13 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
// addResourceList adds the resources in newList to list
func addResourceList(list, new v1.ResourceList) {
for name, quantity := range new {
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = *quantity.Copy()
} else {
@ -53,7 +55,9 @@ func maxResourceList(list, new v1.ResourceList) {
}
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
// containers of the pod.
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
// total container resource requests and to the total container limits which have a
// non-zero quantity.
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
for _, container := range pod.Spec.Containers {
@ -65,37 +69,79 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
}
// if PodOverhead feature is supported, add overhead for running a pod
// to the sum of reqeuests and to non-zero limits:
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
addResourceList(reqs, pod.Spec.Overhead)
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return
}
// GetResourceRequest finds and returns the request for a specific resource.
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
requestQuantity := resource.Quantity{}
switch resourceName {
case v1.ResourceCPU:
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
case v1.ResourceMemory, v1.ResourceStorage, v1.ResourceEphemeralStorage:
requestQuantity = resource.Quantity{Format: resource.BinarySI}
default:
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
}
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
return requestQuantity
}
for _, container := range pod.Spec.Containers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
}
for _, container := range pod.Spec.InitContainers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
if requestQuantity.Cmp(rQuantity) < 0 {
requestQuantity = rQuantity.DeepCopy()
}
}
}
// if PodOverhead feature is supported, add overhead for running a pod
// to the total requests if the resource total is non-zero
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
requestQuantity.Add(podOverhead)
}
}
return requestQuantity
}
// GetResourceRequest finds and returns the request value for a specific resource.
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
if resource == v1.ResourcePods {
return 1
}
totalResources := int64(0)
for _, container := range pod.Spec.Containers {
if rQuantity, ok := container.Resources.Requests[resource]; ok {
if resource == v1.ResourceCPU {
totalResources += rQuantity.MilliValue()
} else {
totalResources += rQuantity.Value()
}
}
requestQuantity := GetResourceRequestQuantity(pod, resource)
if resource == v1.ResourceCPU {
return requestQuantity.MilliValue()
}
// take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers {
if rQuantity, ok := container.Resources.Requests[resource]; ok {
if resource == v1.ResourceCPU {
if rQuantity.MilliValue() > totalResources {
totalResources = rQuantity.MilliValue()
}
} else if rQuantity.Value() > totalResources {
totalResources = rQuantity.Value()
}
}
}
return totalResources
return requestQuantity.Value()
}
// ExtractResourceValueByContainerName extracts the value of a resource

View File

@ -22,7 +22,11 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
)
func TestResourceHelpers(t *testing.T) {
@ -64,27 +68,53 @@ func TestDefaultResourceHelpers(t *testing.T) {
}
func TestGetResourceRequest(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
cases := []struct {
pod *v1.Pod
res v1.ResourceName
cName string
resourceName v1.ResourceName
expectedValue int64
expectedError error
}{
{
pod: getPod("foo", "9", "", "", ""),
res: v1.ResourceCPU,
pod: getPod("foo", podResources{cpuRequest: "9"}),
resourceName: v1.ResourceCPU,
expectedValue: 9000,
},
{
pod: getPod("foo", "", "", "90Mi", ""),
res: v1.ResourceMemory,
pod: getPod("foo", podResources{memoryRequest: "90Mi"}),
resourceName: v1.ResourceMemory,
expectedValue: 94371840,
},
{
cName: "just-overhead for cpu",
pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}),
resourceName: v1.ResourceCPU,
expectedValue: 0,
},
{
cName: "just-overhead for memory",
pod: getPod("foo", podResources{memoryOverhead: "5"}),
resourceName: v1.ResourceMemory,
expectedValue: 0,
},
{
cName: "cpu overhead and req",
pod: getPod("foo", podResources{cpuRequest: "2", cpuOverhead: "5", memoryOverhead: "5"}),
resourceName: v1.ResourceCPU,
expectedValue: 7000,
},
{
cName: "mem overhead and req",
pod: getPod("foo", podResources{cpuRequest: "2", memoryRequest: "1024", cpuOverhead: "5", memoryOverhead: "5"}),
resourceName: v1.ResourceMemory,
expectedValue: 1029,
},
}
as := assert.New(t)
for idx, tc := range cases {
actual := GetResourceRequest(tc.pod, tc.res)
as.Equal(actual, tc.expectedValue, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual)
actual := GetResourceRequest(tc.pod, tc.resourceName)
as.Equal(actual, tc.expectedValue, "expected test case [%d] %v: to return %q; got %q instead", idx, tc.cName, tc.expectedValue, actual)
}
}
@ -101,7 +131,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "limits.cpu",
},
cName: "foo",
pod: getPod("foo", "", "9", "", ""),
pod: getPod("foo", podResources{cpuLimit: "9"}),
expectedValue: "9",
},
{
@ -109,7 +139,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.cpu",
},
cName: "foo",
pod: getPod("foo", "", "", "", ""),
pod: getPod("foo", podResources{}),
expectedValue: "0",
},
{
@ -117,7 +147,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.cpu",
},
cName: "foo",
pod: getPod("foo", "8", "", "", ""),
pod: getPod("foo", podResources{cpuRequest: "8"}),
expectedValue: "8",
},
{
@ -125,7 +155,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.cpu",
},
cName: "foo",
pod: getPod("foo", "100m", "", "", ""),
pod: getPod("foo", podResources{cpuRequest: "100m"}),
expectedValue: "1",
},
{
@ -134,7 +164,7 @@ func TestExtractResourceValue(t *testing.T) {
Divisor: resource.MustParse("100m"),
},
cName: "foo",
pod: getPod("foo", "1200m", "", "", ""),
pod: getPod("foo", podResources{cpuRequest: "1200m"}),
expectedValue: "12",
},
{
@ -142,7 +172,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.memory",
},
cName: "foo",
pod: getPod("foo", "", "", "100Mi", ""),
pod: getPod("foo", podResources{memoryRequest: "100Mi"}),
expectedValue: "104857600",
},
{
@ -151,7 +181,7 @@ func TestExtractResourceValue(t *testing.T) {
Divisor: resource.MustParse("1Mi"),
},
cName: "foo",
pod: getPod("foo", "", "", "100Mi", "1Gi"),
pod: getPod("foo", podResources{memoryRequest: "100Mi", memoryLimit: "1Gi"}),
expectedValue: "100",
},
{
@ -159,7 +189,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "limits.memory",
},
cName: "foo",
pod: getPod("foo", "", "", "10Mi", "100Mi"),
pod: getPod("foo", podResources{memoryRequest: "10Mi", memoryLimit: "100Mi"}),
expectedValue: "104857600",
},
{
@ -167,7 +197,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "limits.cpu",
},
cName: "init-foo",
pod: getPod("foo", "", "9", "", ""),
pod: getPod("foo", podResources{cpuLimit: "9"}),
expectedValue: "9",
},
{
@ -175,7 +205,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.cpu",
},
cName: "init-foo",
pod: getPod("foo", "", "", "", ""),
pod: getPod("foo", podResources{}),
expectedValue: "0",
},
{
@ -183,7 +213,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.cpu",
},
cName: "init-foo",
pod: getPod("foo", "8", "", "", ""),
pod: getPod("foo", podResources{cpuRequest: "8"}),
expectedValue: "8",
},
{
@ -191,7 +221,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.cpu",
},
cName: "init-foo",
pod: getPod("foo", "100m", "", "", ""),
pod: getPod("foo", podResources{cpuRequest: "100m"}),
expectedValue: "1",
},
{
@ -200,7 +230,7 @@ func TestExtractResourceValue(t *testing.T) {
Divisor: resource.MustParse("100m"),
},
cName: "init-foo",
pod: getPod("foo", "1200m", "", "", ""),
pod: getPod("foo", podResources{cpuRequest: "1200m"}),
expectedValue: "12",
},
{
@ -208,7 +238,7 @@ func TestExtractResourceValue(t *testing.T) {
Resource: "requests.memory",
},
cName: "init-foo",
pod: getPod("foo", "", "", "100Mi", ""),
pod: getPod("foo", podResources{memoryRequest: "100Mi"}),
expectedValue: "104857600",
},
{
@ -217,15 +247,16 @@ func TestExtractResourceValue(t *testing.T) {
Divisor: resource.MustParse("1Mi"),
},
cName: "init-foo",
pod: getPod("foo", "", "", "100Mi", "1Gi"),
pod: getPod("foo", podResources{memoryRequest: "100Mi", memoryLimit: "1Gi"}),
expectedValue: "100",
},
{
fs: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
cName: "init-foo",
pod: getPod("foo", "", "", "10Mi", "100Mi"),
cName: "init-foo",
pod: getPod("foo", podResources{memoryRequest: "10Mi", memoryLimit: "100Mi"}),
expectedValue: "104857600",
},
}
@ -241,37 +272,124 @@ func TestExtractResourceValue(t *testing.T) {
}
}
func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1.Pod {
resources := v1.ResourceRequirements{
func TestPodRequestsAndLimits(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
cases := []struct {
pod *v1.Pod
cName string
expectedRequests v1.ResourceList
expectedLimits v1.ResourceList
}{
{
cName: "just-limit-no-overhead",
pod: getPod("foo", podResources{cpuLimit: "9"}),
expectedRequests: v1.ResourceList{},
expectedLimits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"),
},
},
{
cName: "just-overhead",
pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}),
expectedRequests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"),
},
expectedLimits: v1.ResourceList{},
},
{
cName: "req-and-overhead",
pod: getPod("foo", podResources{cpuRequest: "1", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}),
expectedRequests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"),
},
expectedLimits: v1.ResourceList{},
},
{
cName: "all-req-lim-and-overhead",
pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", memoryLimit: "12", cpuOverhead: "5", memoryOverhead: "5"}),
expectedRequests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"),
},
expectedLimits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("17"),
},
},
{
cName: "req-some-lim-and-overhead",
pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}),
expectedRequests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"),
},
expectedLimits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"),
},
},
}
for idx, tc := range cases {
resRequests, resLimits := PodRequestsAndLimits(tc.pod)
if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) {
t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedRequests, resRequests)
}
if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) {
t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedLimits, resLimits)
}
}
}
type podResources struct {
cpuRequest, cpuLimit, memoryRequest, memoryLimit, cpuOverhead, memoryOverhead string
}
func getPod(cname string, resources podResources) *v1.Pod {
r := v1.ResourceRequirements{
Limits: make(v1.ResourceList),
Requests: make(v1.ResourceList),
}
if cpuLimit != "" {
resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit)
overhead := make(v1.ResourceList)
if resources.cpuLimit != "" {
r.Limits[v1.ResourceCPU] = resource.MustParse(resources.cpuLimit)
}
if memoryLimit != "" {
resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit)
if resources.memoryLimit != "" {
r.Limits[v1.ResourceMemory] = resource.MustParse(resources.memoryLimit)
}
if cpuRequest != "" {
resources.Requests[v1.ResourceCPU] = resource.MustParse(cpuRequest)
if resources.cpuRequest != "" {
r.Requests[v1.ResourceCPU] = resource.MustParse(resources.cpuRequest)
}
if memoryRequest != "" {
resources.Requests[v1.ResourceMemory] = resource.MustParse(memoryRequest)
if resources.memoryRequest != "" {
r.Requests[v1.ResourceMemory] = resource.MustParse(resources.memoryRequest)
}
if resources.cpuOverhead != "" {
overhead[v1.ResourceCPU] = resource.MustParse(resources.cpuOverhead)
}
if resources.memoryOverhead != "" {
overhead[v1.ResourceMemory] = resource.MustParse(resources.memoryOverhead)
}
return &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: cname,
Resources: resources,
Resources: r,
},
},
InitContainers: []v1.Container{
{
Name: "init-" + cname,
Resources: resources,
Resources: r,
},
},
Overhead: overhead,
},
}
}

View File

@ -25,9 +25,8 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
v1resource "k8s.io/kubernetes/pkg/api/v1/resource"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -536,8 +535,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc {
p1Memory := memoryUsage(p1Stats.Memory)
p2Memory := memoryUsage(p2Stats.Memory)
p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1
p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1
p1ExceedsRequests := p1Memory.Cmp(v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)) == 1
p2ExceedsRequests := p2Memory.Cmp(v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)) == 1
// prioritize evicting the pod which exceeds its requests
return cmpBool(p1ExceedsRequests, p2ExceedsRequests)
}
@ -555,11 +554,11 @@ func memory(stats statsFunc) cmpFunc {
// adjust p1, p2 usage relative to the request (if any)
p1Memory := memoryUsage(p1Stats.Memory)
p1Request := podRequest(p1, v1.ResourceMemory)
p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)
p1Memory.Sub(p1Request)
p2Memory := memoryUsage(p2Stats.Memory)
p2Request := podRequest(p2, v1.ResourceMemory)
p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)
p2Memory.Sub(p2Request)
// prioritize evicting the pod which has the larger consumption of memory
@ -567,41 +566,6 @@ func memory(stats statsFunc) cmpFunc {
}
}
// podRequest returns the total resource request of a pod which is the
// max(max of init container requests, sum of container requests)
func podRequest(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
containerValue := resource.Quantity{Format: resource.BinarySI}
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
return containerValue
}
for i := range pod.Spec.Containers {
switch resourceName {
case v1.ResourceMemory:
containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.Memory())
case v1.ResourceEphemeralStorage:
containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.StorageEphemeral())
}
}
initValue := resource.Quantity{Format: resource.BinarySI}
for i := range pod.Spec.InitContainers {
switch resourceName {
case v1.ResourceMemory:
if initValue.Cmp(*pod.Spec.InitContainers[i].Resources.Requests.Memory()) < 0 {
initValue = *pod.Spec.InitContainers[i].Resources.Requests.Memory()
}
case v1.ResourceEphemeralStorage:
if initValue.Cmp(*pod.Spec.InitContainers[i].Resources.Requests.StorageEphemeral()) < 0 {
initValue = *pod.Spec.InitContainers[i].Resources.Requests.StorageEphemeral()
}
}
}
if containerValue.Cmp(initValue) > 0 {
return containerValue
}
return initValue
}
// exceedDiskRequests compares whether or not pods' disk usage exceeds their requests
func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc {
return func(p1, p2 *v1.Pod) int {
@ -621,8 +585,8 @@ func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskRes
p1Disk := p1Usage[diskResource]
p2Disk := p2Usage[diskResource]
p1ExceedsRequests := p1Disk.Cmp(podRequest(p1, diskResource)) == 1
p2ExceedsRequests := p2Disk.Cmp(podRequest(p2, diskResource)) == 1
p1ExceedsRequests := p1Disk.Cmp(v1resource.GetResourceRequestQuantity(p1, diskResource)) == 1
p2ExceedsRequests := p2Disk.Cmp(v1resource.GetResourceRequestQuantity(p2, diskResource)) == 1
// prioritize evicting the pod which exceeds its requests
return cmpBool(p1ExceedsRequests, p2ExceedsRequests)
}
@ -647,9 +611,9 @@ func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.Resou
// adjust p1, p2 usage relative to the request (if any)
p1Disk := p1Usage[diskResource]
p2Disk := p2Usage[diskResource]
p1Request := podRequest(p1, v1.ResourceEphemeralStorage)
p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceEphemeralStorage)
p1Disk.Sub(p1Request)
p2Request := podRequest(p2, v1.ResourceEphemeralStorage)
p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceEphemeralStorage)
p2Disk.Sub(p2Request)
// prioritize evicting the pod which has the larger consumption of disk
return p2Disk.Cmp(p1Disk)