diff --git a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go index dc3f838f008..61dac8c7cf2 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go @@ -3530,13 +3530,32 @@ func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits)) + extResources := make([]string, 0, len(allocatable)) + hugePageResources := make([]string, 0, len(allocatable)) for resource := range allocatable { - if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { + if resourcehelper.IsHugePageResourceName(resource) { + hugePageResources = append(hugePageResources, string(resource)) + } else if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { extResources = append(extResources, string(resource)) } } + sort.Strings(extResources) + sort.Strings(hugePageResources) + + for _, resource := range hugePageResources { + hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)] + fractionHugePageSizeRequests := float64(0) + fractionHugePageSizeLimits := float64(0) + if hugePageSizeAllocable.Value() != 0 { + fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100 + fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100 + } + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + resource, hugePageSizeRequests.String(), int64(fractionHugePageSizeRequests), hugePageSizeLimits.String(), int64(fractionHugePageSizeLimits)) + } + for _, ext := range extResources { extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)] w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String()) diff --git a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go index a01ea686bac..c70bccc58a7 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe_test.go @@ -3503,9 +3503,38 @@ Events: ` + "\n" } } +func getHugePageResourceList(pageSize, value string) corev1.ResourceList { + res := corev1.ResourceList{} + if pageSize != "" && value != "" { + res[corev1.ResourceName(corev1.ResourceHugePagesPrefix+pageSize)] = resource.MustParse(value) + } + return res +} + +// mergeResourceLists will merge resoure lists. When two lists have the same resourece, the value from +// the last list will be present in the result +func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for _, rl := range resourceLists { + for resource, quantity := range rl { + result[resource] = quantity + } + } + return result +} func TestDescribeNode(t *testing.T) { holderIdentity := "holder" + nodeCapacity := mergeResourceLists( + getHugePageResourceList("2Mi", "4Gi"), + getResourceList("8", "24Gi"), + getHugePageResourceList("1Gi", "0"), + ) + nodeAllocatable := mergeResourceLists( + getHugePageResourceList("2Mi", "2Gi"), + getResourceList("4", "12Gi"), + getHugePageResourceList("1Gi", "0"), + ) fake := fake.NewSimpleClientset( &corev1.Node{ @@ -3515,6 +3544,10 @@ func TestDescribeNode(t *testing.T) { Spec: corev1.NodeSpec{ Unschedulable: true, }, + Status: corev1.NodeStatus{ + Capacity: nodeCapacity, + Allocatable: nodeAllocatable, + }, }, &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ @@ -3527,6 +3560,38 @@ func TestDescribeNode(t *testing.T) { RenewTime: &metav1.MicroTime{Time: time.Now()}, }, }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-with-resources", + Namespace: "foo", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "cpu-mem", + Image: "image:latest", + Resources: corev1.ResourceRequirements{ + Requests: getResourceList("1", "1Gi"), + Limits: getResourceList("2", "2Gi"), + }, + }, + { + Name: "hugepages", + Image: "image:latest", + Resources: corev1.ResourceRequirements{ + Requests: getHugePageResourceList("2Mi", "512Mi"), + Limits: getHugePageResourceList("2Mi", "512Mi"), + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, ) c := &describeClient{T: t, Namespace: "foo", Interface: fake} d := NodeDescriber{c} @@ -3535,7 +3600,16 @@ func TestDescribeNode(t *testing.T) { t.Errorf("unexpected error: %v", err) } - expectedOut := []string{"Unschedulable", "true", "holder"} + expectedOut := []string{"Unschedulable", "true", "holder", + `Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 1 (25%) 2 (50%) + memory 1Gi (8%) 2Gi (16%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 512Mi (25%) 512Mi (25%)`} for _, expected := range expectedOut { if !strings.Contains(out, expected) { t.Errorf("expected to find %q in output: %q", expected, out)