mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Merge pull request #80605 from odinuge/hugetlb-kubectl-node-describe
Add huge page usage stats (Allocated resources) to kubectl describe node
This commit is contained in:
commit
5aa2e3ca06
@ -3530,13 +3530,32 @@ func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev
|
|||||||
corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits))
|
corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits))
|
||||||
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
|
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
|
||||||
corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits))
|
corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits))
|
||||||
|
|
||||||
extResources := make([]string, 0, len(allocatable))
|
extResources := make([]string, 0, len(allocatable))
|
||||||
|
hugePageResources := make([]string, 0, len(allocatable))
|
||||||
for resource := range allocatable {
|
for resource := range allocatable {
|
||||||
if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods {
|
if resourcehelper.IsHugePageResourceName(resource) {
|
||||||
|
hugePageResources = append(hugePageResources, string(resource))
|
||||||
|
} else if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods {
|
||||||
extResources = append(extResources, string(resource))
|
extResources = append(extResources, string(resource))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Strings(extResources)
|
sort.Strings(extResources)
|
||||||
|
sort.Strings(hugePageResources)
|
||||||
|
|
||||||
|
for _, resource := range hugePageResources {
|
||||||
|
hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)]
|
||||||
|
fractionHugePageSizeRequests := float64(0)
|
||||||
|
fractionHugePageSizeLimits := float64(0)
|
||||||
|
if hugePageSizeAllocable.Value() != 0 {
|
||||||
|
fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100
|
||||||
|
fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100
|
||||||
|
}
|
||||||
|
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
|
||||||
|
resource, hugePageSizeRequests.String(), int64(fractionHugePageSizeRequests), hugePageSizeLimits.String(), int64(fractionHugePageSizeLimits))
|
||||||
|
}
|
||||||
|
|
||||||
for _, ext := range extResources {
|
for _, ext := range extResources {
|
||||||
extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)]
|
extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)]
|
||||||
w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String())
|
w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String())
|
||||||
|
@ -3503,9 +3503,38 @@ Events: <none>` + "\n"
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
func getHugePageResourceList(pageSize, value string) corev1.ResourceList {
|
||||||
|
res := corev1.ResourceList{}
|
||||||
|
if pageSize != "" && value != "" {
|
||||||
|
res[corev1.ResourceName(corev1.ResourceHugePagesPrefix+pageSize)] = resource.MustParse(value)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeResourceLists will merge resoure lists. When two lists have the same resourece, the value from
|
||||||
|
// the last list will be present in the result
|
||||||
|
func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceList {
|
||||||
|
result := corev1.ResourceList{}
|
||||||
|
for _, rl := range resourceLists {
|
||||||
|
for resource, quantity := range rl {
|
||||||
|
result[resource] = quantity
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func TestDescribeNode(t *testing.T) {
|
func TestDescribeNode(t *testing.T) {
|
||||||
holderIdentity := "holder"
|
holderIdentity := "holder"
|
||||||
|
nodeCapacity := mergeResourceLists(
|
||||||
|
getHugePageResourceList("2Mi", "4Gi"),
|
||||||
|
getResourceList("8", "24Gi"),
|
||||||
|
getHugePageResourceList("1Gi", "0"),
|
||||||
|
)
|
||||||
|
nodeAllocatable := mergeResourceLists(
|
||||||
|
getHugePageResourceList("2Mi", "2Gi"),
|
||||||
|
getResourceList("4", "12Gi"),
|
||||||
|
getHugePageResourceList("1Gi", "0"),
|
||||||
|
)
|
||||||
|
|
||||||
fake := fake.NewSimpleClientset(
|
fake := fake.NewSimpleClientset(
|
||||||
&corev1.Node{
|
&corev1.Node{
|
||||||
@ -3515,6 +3544,10 @@ func TestDescribeNode(t *testing.T) {
|
|||||||
Spec: corev1.NodeSpec{
|
Spec: corev1.NodeSpec{
|
||||||
Unschedulable: true,
|
Unschedulable: true,
|
||||||
},
|
},
|
||||||
|
Status: corev1.NodeStatus{
|
||||||
|
Capacity: nodeCapacity,
|
||||||
|
Allocatable: nodeAllocatable,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
&coordinationv1.Lease{
|
&coordinationv1.Lease{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -3527,6 +3560,38 @@ func TestDescribeNode(t *testing.T) {
|
|||||||
RenewTime: &metav1.MicroTime{Time: time.Now()},
|
RenewTime: &metav1.MicroTime{Time: time.Now()},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
&corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pod-with-resources",
|
||||||
|
Namespace: "foo",
|
||||||
|
},
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "cpu-mem",
|
||||||
|
Image: "image:latest",
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("1", "1Gi"),
|
||||||
|
Limits: getResourceList("2", "2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages",
|
||||||
|
Image: "image:latest",
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Requests: getHugePageResourceList("2Mi", "512Mi"),
|
||||||
|
Limits: getHugePageResourceList("2Mi", "512Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{
|
||||||
|
Phase: corev1.PodRunning,
|
||||||
|
},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
c := &describeClient{T: t, Namespace: "foo", Interface: fake}
|
c := &describeClient{T: t, Namespace: "foo", Interface: fake}
|
||||||
d := NodeDescriber{c}
|
d := NodeDescriber{c}
|
||||||
@ -3535,7 +3600,16 @@ func TestDescribeNode(t *testing.T) {
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedOut := []string{"Unschedulable", "true", "holder"}
|
expectedOut := []string{"Unschedulable", "true", "holder",
|
||||||
|
`Allocated resources:
|
||||||
|
(Total limits may be over 100 percent, i.e., overcommitted.)
|
||||||
|
Resource Requests Limits
|
||||||
|
-------- -------- ------
|
||||||
|
cpu 1 (25%) 2 (50%)
|
||||||
|
memory 1Gi (8%) 2Gi (16%)
|
||||||
|
ephemeral-storage 0 (0%) 0 (0%)
|
||||||
|
hugepages-1Gi 0 (0%) 0 (0%)
|
||||||
|
hugepages-2Mi 512Mi (25%) 512Mi (25%)`}
|
||||||
for _, expected := range expectedOut {
|
for _, expected := range expectedOut {
|
||||||
if !strings.Contains(out, expected) {
|
if !strings.Contains(out, expected) {
|
||||||
t.Errorf("expected to find %q in output: %q", expected, out)
|
t.Errorf("expected to find %q in output: %q", expected, out)
|
||||||
|
Loading…
Reference in New Issue
Block a user