From 439a862bba60f05fb21338bfa02e547428c6ce0c Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Thu, 25 Jul 2019 22:14:35 +0200 Subject: [PATCH 1/3] Add huge page usage stats to kubectl describe node --- .../pkg/describe/versioned/describe.go | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go index acf5ef125e1..2b6387c23a6 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go @@ -3527,13 +3527,32 @@ func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits)) + extResources := make([]string, 0, len(allocatable)) + hugePageResources := make([]string, 0, len(allocatable)) for resource := range allocatable { - if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { + if resourcehelper.IsHugePageResourceName(resource) { + hugePageResources = append(hugePageResources, string(resource)) + } else if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { extResources = append(extResources, string(resource)) } } + sort.Strings(extResources) + sort.Strings(hugePageResources) + + for _, resource := range hugePageResources { + hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)] + fractionHugePageSizeRequests := float64(0) + fractionHugePageSizeLimits := float64(0) + if hugePageSizeAllocable.Value() != 0 { + fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100 + fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100 + } + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + resource, hugePageSizeRequests.String(), int64(fractionHugePageSizeRequests), hugePageSizeLimits.String(), int64(fractionHugePageSizeLimits)) + } + for _, ext := range extResources { extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)] w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String()) From 28f8e52f320595fd434a2699850e57bc297474a4 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Sat, 14 Dec 2019 13:24:18 +0100 Subject: [PATCH 2/3] Add "kubectl describe node" resource tests --- .../pkg/describe/versioned/describe_test.go | 66 ++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go index bde4fc051c3..a2130edfcf9 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go @@ -3503,9 +3503,28 @@ Events: ` + "\n" } } +func getHugePageResourceList(pageSize, value string) corev1.ResourceList { + res := corev1.ResourceList{} + if pageSize != "" && value != "" { + res[corev1.ResourceName(corev1.ResourceHugePagesPrefix+pageSize)] = resource.MustParse(value) + } + return res +} func TestDescribeNode(t *testing.T) { holderIdentity := "holder" + nodeCapacity := corev1.ResourceList{} + for _, rl := range []corev1.ResourceList{getHugePageResourceList("2Mi", "4Gi"), getResourceList("8", "24Gi"), getHugePageResourceList("1Gi", "0")} { + for resource, value := range rl { + nodeCapacity[resource] = value + } + } + nodeAllocatable := corev1.ResourceList{} + for _, rl := range []corev1.ResourceList{getHugePageResourceList("2Mi", "2Gi"), getResourceList("4", "12Gi"), getHugePageResourceList("1Gi", "0")} { + for resource, value := range rl { + nodeAllocatable[resource] = value + } + } fake := fake.NewSimpleClientset( &corev1.Node{ @@ -3515,6 +3534,10 @@ func TestDescribeNode(t *testing.T) { Spec: corev1.NodeSpec{ Unschedulable: true, }, + Status: corev1.NodeStatus{ + Capacity: nodeCapacity, + Allocatable: nodeAllocatable, + }, }, &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ @@ -3527,6 +3550,38 @@ func TestDescribeNode(t *testing.T) { RenewTime: &metav1.MicroTime{Time: time.Now()}, }, }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-with-resources", + Namespace: "foo", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "cpu-mem", + Image: "image:latest", + Resources: corev1.ResourceRequirements{ + Requests: getResourceList("1", "1Gi"), + Limits: getResourceList("2", "2Gi"), + }, + }, + { + Name: "hugepages", + Image: "image:latest", + Resources: corev1.ResourceRequirements{ + Requests: getHugePageResourceList("2Mi", "512Mi"), + Limits: getHugePageResourceList("2Mi", "512Mi"), + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, ) c := &describeClient{T: t, Namespace: "foo", Interface: fake} d := NodeDescriber{c} @@ -3535,7 +3590,16 @@ func TestDescribeNode(t *testing.T) { t.Errorf("unexpected error: %v", err) } - expectedOut := []string{"Unschedulable", "true", "holder"} + expectedOut := []string{"Unschedulable", "true", "holder", + `Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 1 (25%) 2 (50%) + memory 1Gi (8%) 2Gi (16%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 512Mi (25%) 512Mi (25%)`} for _, expected := range expectedOut { if !strings.Contains(out, expected) { t.Errorf("expected to find %q in output: %q", expected, out) From 51bff87b18d6482f7383fb2151c7e483a1641ca7 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Sat, 14 Dec 2019 20:33:35 +0100 Subject: [PATCH 3/3] Add util function to merge resource lists --- .../pkg/describe/versioned/describe_test.go | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go index a2130edfcf9..b505aa3eb36 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go @@ -3511,20 +3511,30 @@ func getHugePageResourceList(pageSize, value string) corev1.ResourceList { return res } +// mergeResourceLists will merge resoure lists. When two lists have the same resourece, the value from +// the last list will be present in the result +func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for _, rl := range resourceLists { + for resource, quantity := range rl { + result[resource] = quantity + } + } + return result +} + func TestDescribeNode(t *testing.T) { holderIdentity := "holder" - nodeCapacity := corev1.ResourceList{} - for _, rl := range []corev1.ResourceList{getHugePageResourceList("2Mi", "4Gi"), getResourceList("8", "24Gi"), getHugePageResourceList("1Gi", "0")} { - for resource, value := range rl { - nodeCapacity[resource] = value - } - } - nodeAllocatable := corev1.ResourceList{} - for _, rl := range []corev1.ResourceList{getHugePageResourceList("2Mi", "2Gi"), getResourceList("4", "12Gi"), getHugePageResourceList("1Gi", "0")} { - for resource, value := range rl { - nodeAllocatable[resource] = value - } - } + nodeCapacity := mergeResourceLists( + getHugePageResourceList("2Mi", "4Gi"), + getResourceList("8", "24Gi"), + getHugePageResourceList("1Gi", "0"), + ) + nodeAllocatable := mergeResourceLists( + getHugePageResourceList("2Mi", "2Gi"), + getResourceList("4", "12Gi"), + getHugePageResourceList("1Gi", "0"), + ) fake := fake.NewSimpleClientset( &corev1.Node{