mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #130577 from KevinTMtz/pod-level-hugepages
[PodLevelResources] Pod Level Hugepage Resources
This commit is contained in:
commit
838f3c0852
@ -27,6 +27,7 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
resourcehelper "k8s.io/component-helpers/resource"
|
resourcehelper "k8s.io/component-helpers/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1/service"
|
"k8s.io/kubernetes/pkg/api/v1/service"
|
||||||
|
corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/util/parsers"
|
"k8s.io/kubernetes/pkg/util/parsers"
|
||||||
)
|
)
|
||||||
@ -196,6 +197,7 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
|||||||
// Pod Requests default values must be applied after container-level default values
|
// Pod Requests default values must be applied after container-level default values
|
||||||
// have been populated.
|
// have been populated.
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) {
|
||||||
|
defaultHugePagePodLimits(obj)
|
||||||
defaultPodRequests(obj)
|
defaultPodRequests(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -453,7 +455,9 @@ func defaultPodRequests(obj *v1.Pod) {
|
|||||||
// PodLevelResources feature) and pod-level requests are not set, the pod-level requests
|
// PodLevelResources feature) and pod-level requests are not set, the pod-level requests
|
||||||
// default to the effective requests of all the containers for that resource.
|
// default to the effective requests of all the containers for that resource.
|
||||||
for key, aggrCtrLim := range aggrCtrReqs {
|
for key, aggrCtrLim := range aggrCtrReqs {
|
||||||
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
|
// Defaulting for pod level hugepages requests takes them directly from the pod limit,
|
||||||
|
// hugepages cannot be overcommited and must have the limit, so we skip them here.
|
||||||
|
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) && !corev1helper.IsHugePageResourceName(key) {
|
||||||
podReqs[key] = aggrCtrLim.DeepCopy()
|
podReqs[key] = aggrCtrLim.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -461,6 +465,8 @@ func defaultPodRequests(obj *v1.Pod) {
|
|||||||
// When no containers specify requests for a resource, the pod-level requests
|
// When no containers specify requests for a resource, the pod-level requests
|
||||||
// will default to match the pod-level limits, if pod-level
|
// will default to match the pod-level limits, if pod-level
|
||||||
// limits exist for that resource.
|
// limits exist for that resource.
|
||||||
|
// Defaulting for pod level hugepages requests is dependent on defaultHugePagePodLimits,
|
||||||
|
// if defaultHugePagePodLimits defined the limit, the request will be set here.
|
||||||
for key, podLim := range obj.Spec.Resources.Limits {
|
for key, podLim := range obj.Spec.Resources.Limits {
|
||||||
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
|
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
|
||||||
podReqs[key] = podLim.DeepCopy()
|
podReqs[key] = podLim.DeepCopy()
|
||||||
@ -473,3 +479,44 @@ func defaultPodRequests(obj *v1.Pod) {
|
|||||||
obj.Spec.Resources.Requests = podReqs
|
obj.Spec.Resources.Requests = podReqs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// defaultHugePagePodLimits applies default values for pod-level limits, only when
|
||||||
|
// container hugepage limits are set, but not at pod level, in following
|
||||||
|
// scenario:
|
||||||
|
// 1. When at least one container (regular, init or sidecar) has hugepage
|
||||||
|
// limits set:
|
||||||
|
// The pod-level limit becomes equal to the aggregated hugepages limit of all
|
||||||
|
// the containers in the pod.
|
||||||
|
func defaultHugePagePodLimits(obj *v1.Pod) {
|
||||||
|
// We only populate defaults when the pod-level resources are partly specified already.
|
||||||
|
if obj.Spec.Resources == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj.Spec.Resources.Limits) == 0 && len(obj.Spec.Resources.Requests) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var podLims v1.ResourceList
|
||||||
|
podLims = obj.Spec.Resources.Limits
|
||||||
|
if podLims == nil {
|
||||||
|
podLims = make(v1.ResourceList)
|
||||||
|
}
|
||||||
|
|
||||||
|
aggrCtrLims := resourcehelper.AggregateContainerLimits(obj, resourcehelper.PodResourcesOptions{})
|
||||||
|
|
||||||
|
// When containers specify limits for hugepages and pod-level limits are not
|
||||||
|
// set for that resource, the pod-level limit will default to the aggregated
|
||||||
|
// hugepages limit of all the containers.
|
||||||
|
for key, aggrCtrLim := range aggrCtrLims {
|
||||||
|
if _, exists := podLims[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) && corev1helper.IsHugePageResourceName(key) {
|
||||||
|
podLims[key] = aggrCtrLim.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set pod-level resource limits in the PodSpec if the requirements map
|
||||||
|
// contains entries after collecting container-level limits and pod-level limits for hugepages.
|
||||||
|
if len(podLims) > 0 {
|
||||||
|
obj.Spec.Resources.Limits = podLims
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1222,6 +1222,298 @@ func TestPodResourcesDefaults(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
name: "pod hugepages requests=unset limits=unset, container hugepages requests=unset limits=set",
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
podResources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedPodSpec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("3m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("6Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("6Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "pod hugepages requests=unset limits=set, container hugepages requests=unset limits=set",
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
podResources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedPodSpec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("3m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "pod hugepages requests=set limits=set, container hugepages requests=unset limits=set",
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
podResources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedPodSpec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("4Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "pod hugepages requests=set limits=set, container hugepages requests=unset limits=unset",
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
podResources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedPodSpec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "pod hugepages requests=unset limits=set, container hugepages requests=unset limits=set different hugepagesizes between pod and container level",
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
podResources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedPodSpec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("3m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("5m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("2m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
"cpu": resource.MustParse("1m"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
|
@ -19610,7 +19610,7 @@ func TestValidatePodResourceConsistency(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "indivdual container limits greater than pod limits",
|
name: "individual container limits greater than pod limits",
|
||||||
podResources: core.ResourceRequirements{
|
podResources: core.ResourceRequirements{
|
||||||
Limits: core.ResourceList{
|
Limits: core.ResourceList{
|
||||||
core.ResourceCPU: resource.MustParse("10"),
|
core.ResourceCPU: resource.MustParse("10"),
|
||||||
@ -19670,6 +19670,8 @@ func TestValidatePodResourceNames(t *testing.T) {
|
|||||||
{"memory", false},
|
{"memory", false},
|
||||||
{"cpu", false},
|
{"cpu", false},
|
||||||
{"storage", true},
|
{"storage", true},
|
||||||
|
{v1.ResourceHugePagesPrefix + "2Mi", false},
|
||||||
|
{v1.ResourceHugePagesPrefix + "1Gi", false},
|
||||||
{"requests.cpu", true},
|
{"requests.cpu", true},
|
||||||
{"requests.memory", true},
|
{"requests.memory", true},
|
||||||
{"requests.storage", true},
|
{"requests.storage", true},
|
||||||
@ -19714,6 +19716,8 @@ func TestValidateResourceNames(t *testing.T) {
|
|||||||
{"memory", true, ""},
|
{"memory", true, ""},
|
||||||
{"cpu", true, ""},
|
{"cpu", true, ""},
|
||||||
{"storage", true, ""},
|
{"storage", true, ""},
|
||||||
|
{v1.ResourceHugePagesPrefix + "2Mi", true, ""},
|
||||||
|
{v1.ResourceHugePagesPrefix + "1Gi", true, ""},
|
||||||
{"requests.cpu", true, ""},
|
{"requests.cpu", true, ""},
|
||||||
{"requests.memory", true, ""},
|
{"requests.memory", true, ""},
|
||||||
{"requests.storage", true, ""},
|
{"requests.storage", true, ""},
|
||||||
@ -24301,6 +24305,48 @@ func TestValidateResourceRequirements(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
validateFn: ValidateContainerResourceRequirements,
|
validateFn: ValidateContainerResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "container resource hugepage with cpu or memory",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
core.ResourceCPU: resource.MustParse("10"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: ValidateContainerResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "container resource hugepage limit without request",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceMemory: resource.MustParse("2Mi"),
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: ValidateContainerResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource hugepages with cpu or memory",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceMemory: resource.MustParse("2Mi"),
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource hugepages limit without request",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceMemory: resource.MustParse("2Mi"),
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
}, {
|
}, {
|
||||||
name: "limits and requests of memory resource are equal",
|
name: "limits and requests of memory resource are equal",
|
||||||
requirements: core.ResourceRequirements{
|
requirements: core.ResourceRequirements{
|
||||||
@ -24363,62 +24409,81 @@ func TestValidateResourceRequirements(t *testing.T) {
|
|||||||
validateFn func(requirements *core.ResourceRequirements,
|
validateFn func(requirements *core.ResourceRequirements,
|
||||||
podClaimNames sets.Set[string], fldPath *field.Path,
|
podClaimNames sets.Set[string], fldPath *field.Path,
|
||||||
opts PodValidationOptions) field.ErrorList
|
opts PodValidationOptions) field.ErrorList
|
||||||
}{{
|
}{
|
||||||
name: "hugepage resource without cpu or memory",
|
{
|
||||||
requirements: core.ResourceRequirements{
|
name: "container resource hugepage without cpu or memory",
|
||||||
Limits: core.ResourceList{
|
requirements: core.ResourceRequirements{
|
||||||
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Requests: core.ResourceList{
|
validateFn: ValidateContainerResourceRequirements,
|
||||||
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
}, {
|
||||||
|
name: "container resource hugepage without limit",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceMemory: resource.MustParse("2Mi"),
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
validateFn: ValidateContainerResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource hugepages without cpu or memory",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource hugepages request without limit",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceMemory: resource.MustParse("2Mi"),
|
||||||
|
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource with ephemeral-storage",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceEphemeralStorage): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceName(core.ResourceEphemeralStorage + "2Mi"): resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource with unsupported prefixed resources",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName("kubernetesio/" + core.ResourceCPU): resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceName("kubernetesio/" + core.ResourceMemory): resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
|
}, {
|
||||||
|
name: "pod resource with unsupported native resources",
|
||||||
|
requirements: core.ResourceRequirements{
|
||||||
|
Limits: core.ResourceList{
|
||||||
|
core.ResourceName("kubernetes.io/" + strings.Repeat("a", 63)): resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
Requests: core.ResourceList{
|
||||||
|
core.ResourceName("kubernetes.io/" + strings.Repeat("a", 63)): resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateFn: validatePodResourceRequirements,
|
||||||
},
|
},
|
||||||
validateFn: ValidateContainerResourceRequirements,
|
|
||||||
}, {
|
|
||||||
name: "pod resource with hugepages",
|
|
||||||
requirements: core.ResourceRequirements{
|
|
||||||
Limits: core.ResourceList{
|
|
||||||
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
|
||||||
},
|
|
||||||
Requests: core.ResourceList{
|
|
||||||
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validateFn: validatePodResourceRequirements,
|
|
||||||
}, {
|
|
||||||
name: "pod resource with ephemeral-storage",
|
|
||||||
requirements: core.ResourceRequirements{
|
|
||||||
Limits: core.ResourceList{
|
|
||||||
core.ResourceName(core.ResourceEphemeralStorage): resource.MustParse("2Mi"),
|
|
||||||
},
|
|
||||||
Requests: core.ResourceList{
|
|
||||||
core.ResourceName(core.ResourceEphemeralStorage + "2Mi"): resource.MustParse("2Mi"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validateFn: validatePodResourceRequirements,
|
|
||||||
}, {
|
|
||||||
name: "pod resource with unsupported prefixed resources",
|
|
||||||
requirements: core.ResourceRequirements{
|
|
||||||
Limits: core.ResourceList{
|
|
||||||
core.ResourceName("kubernetesio/" + core.ResourceCPU): resource.MustParse("2"),
|
|
||||||
},
|
|
||||||
Requests: core.ResourceList{
|
|
||||||
core.ResourceName("kubernetesio/" + core.ResourceMemory): resource.MustParse("2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validateFn: validatePodResourceRequirements,
|
|
||||||
}, {
|
|
||||||
name: "pod resource with unsupported native resources",
|
|
||||||
requirements: core.ResourceRequirements{
|
|
||||||
Limits: core.ResourceList{
|
|
||||||
core.ResourceName("kubernetes.io/" + strings.Repeat("a", 63)): resource.MustParse("2"),
|
|
||||||
},
|
|
||||||
Requests: core.ResourceList{
|
|
||||||
core.ResourceName("kubernetes.io/" + strings.Repeat("a", 63)): resource.MustParse("2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validateFn: validatePodResourceRequirements,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "pod resource with unsupported empty native resource name",
|
name: "pod resource with unsupported empty native resource name",
|
||||||
requirements: core.ResourceRequirements{
|
requirements: core.ResourceRequirements{
|
||||||
|
@ -543,6 +543,45 @@ func TestEnoughRequests(t *testing.T) {
|
|||||||
ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
|
ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: newPodLevelResourcesPod(
|
||||||
|
newResourcePod(),
|
||||||
|
v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("2"), hugePageResourceA: *resource.NewQuantity(5, resource.BinarySI)},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
nodeInfo: framework.NewNodeInfo(),
|
||||||
|
name: "pod-level hugepages resource fit",
|
||||||
|
wantInsufficientResources: []InsufficientResource{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: newPodLevelResourcesPod(
|
||||||
|
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
|
||||||
|
v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("2"), hugePageResourceA: *resource.NewQuantity(5, resource.BinarySI)},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
nodeInfo: framework.NewNodeInfo(),
|
||||||
|
name: "both pod-level and container-level hugepages resource fit",
|
||||||
|
wantInsufficientResources: []InsufficientResource{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: newPodLevelResourcesPod(
|
||||||
|
newResourcePod(),
|
||||||
|
v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("2"), hugePageResourceA: *resource.NewQuantity(10, resource.BinarySI)},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
nodeInfo: framework.NewNodeInfo(),
|
||||||
|
name: "pod-level hugepages resource not fit",
|
||||||
|
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||||
|
wantInsufficientResources: []InsufficientResource{
|
||||||
|
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
podLevelResourcesEnabled: true,
|
podLevelResourcesEnabled: true,
|
||||||
pod: newResourceInitPod(newPodLevelResourcesPod(
|
pod: newResourceInitPod(newPodLevelResourcesPod(
|
||||||
@ -1547,8 +1586,25 @@ func TestIsFit(t *testing.T) {
|
|||||||
pod: st.MakePod().Resources(
|
pod: st.MakePod().Resources(
|
||||||
v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}},
|
v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}},
|
||||||
).Obj(),
|
).Obj(),
|
||||||
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
|
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
|
||||||
expected: true,
|
podLevelResourcesEnabled: true,
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
"sufficient pod-level resource hugepages": {
|
||||||
|
pod: st.MakePod().Resources(
|
||||||
|
v1.ResourceRequirements{Requests: v1.ResourceList{hugePageResourceA: resource.MustParse("2Mi")}},
|
||||||
|
).Obj(),
|
||||||
|
node: st.MakeNode().Capacity(map[v1.ResourceName]string{hugePageResourceA: "2Mi"}).Obj(),
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
"insufficient pod-level resource hugepages": {
|
||||||
|
pod: st.MakePod().Resources(
|
||||||
|
v1.ResourceRequirements{Requests: v1.ResourceList{hugePageResourceA: resource.MustParse("4Mi")}},
|
||||||
|
).Obj(),
|
||||||
|
node: st.MakeNode().Capacity(map[v1.ResourceName]string{hugePageResourceA: "2Mi"}).Obj(),
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
expected: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/swap"
|
"k8s.io/kubernetes/pkg/kubelet/util/swap"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
@ -32,8 +33,8 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
resourcehelper "k8s.io/component-helpers/resource"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||||
usernamespacefeature "k8s.io/kubernetes/pkg/kubelet/userns"
|
usernamespacefeature "k8s.io/kubernetes/pkg/kubelet/userns"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
@ -83,7 +84,7 @@ func (plugin *emptyDirPlugin) GetPluginName() string {
|
|||||||
func (plugin *emptyDirPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *emptyDirPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _ := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if volumeSource == nil {
|
||||||
return "", fmt.Errorf("Spec does not reference an EmptyDir volume type")
|
return "", fmt.Errorf("spec does not reference an emptyDir volume type")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return user defined volume name, since this is an ephemeral volume type
|
// Return user defined volume name, since this is an ephemeral volume type
|
||||||
@ -405,10 +406,19 @@ func getPageSizeMountOption(medium v1.StorageMedium, pod *v1.Pod) (string, error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podLevelAndContainerLevelRequests := []v1.ResourceList{}
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
|
||||||
|
podLevelAndContainerLevelRequests = append(podLevelAndContainerLevelRequests, pod.Spec.Resources.Requests)
|
||||||
|
}
|
||||||
|
|
||||||
// In some rare cases init containers can also consume Huge pages
|
// In some rare cases init containers can also consume Huge pages
|
||||||
for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) {
|
for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) {
|
||||||
// We can take request because limit and requests must match.
|
podLevelAndContainerLevelRequests = append(podLevelAndContainerLevelRequests, container.Resources.Requests)
|
||||||
for requestName := range container.Resources.Requests {
|
}
|
||||||
|
|
||||||
|
// We can take request because limit and requests must match.
|
||||||
|
for _, resourceList := range podLevelAndContainerLevelRequests {
|
||||||
|
for requestName := range resourceList {
|
||||||
if !v1helper.IsHugePageResourceName(requestName) {
|
if !v1helper.IsHugePageResourceName(requestName) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -438,7 +448,6 @@ func getPageSizeMountOption(medium v1.StorageMedium, pod *v1.Pod) (string, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s=%s", hugePagesPageSizeMountOption, pageSize.String()), nil
|
return fmt.Sprintf("%s=%s", hugePagesPageSizeMountOption, pageSize.String()), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupDir creates the directory with the default permissions specified by the perm constant.
|
// setupDir creates the directory with the default permissions specified by the perm constant.
|
||||||
|
@ -26,6 +26,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/swap"
|
"k8s.io/kubernetes/pkg/kubelet/util/swap"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -373,10 +376,11 @@ func TestMetrics(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetHugePagesMountOptions(t *testing.T) {
|
func TestGetHugePagesMountOptions(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
medium v1.StorageMedium
|
medium v1.StorageMedium
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
expectedResult string
|
expectedResult string
|
||||||
|
podLevelResourcesEnabled bool
|
||||||
}{
|
}{
|
||||||
"ProperValues": {
|
"ProperValues": {
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -605,10 +609,124 @@ func TestGetHugePagesMountOptions(t *testing.T) {
|
|||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
expectedResult: "",
|
expectedResult: "",
|
||||||
},
|
},
|
||||||
|
"PodLevelResourcesSinglePageSize": {
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
medium: v1.StorageMediumHugePages,
|
||||||
|
shouldFail: false,
|
||||||
|
expectedResult: "pagesize=2Mi",
|
||||||
|
},
|
||||||
|
"PodLevelResourcesSinglePageSizeMediumPrefix": {
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
medium: v1.StorageMediumHugePagesPrefix + "2Mi",
|
||||||
|
shouldFail: false,
|
||||||
|
expectedResult: "pagesize=2Mi",
|
||||||
|
},
|
||||||
|
"PodLevelResourcesMultiPageSize": {
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||||
|
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
medium: v1.StorageMediumHugePages,
|
||||||
|
shouldFail: true,
|
||||||
|
expectedResult: "",
|
||||||
|
},
|
||||||
|
"PodLevelResourcesMultiPageSizeMediumPrefix": {
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||||
|
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
medium: v1.StorageMediumHugePagesPrefix + "2Mi",
|
||||||
|
shouldFail: false,
|
||||||
|
expectedResult: "pagesize=2Mi",
|
||||||
|
},
|
||||||
|
"PodAndContainerLevelResourcesMultiPageSizeHugePagesMedium": {
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
medium: v1.StorageMediumHugePages,
|
||||||
|
shouldFail: true,
|
||||||
|
expectedResult: "",
|
||||||
|
},
|
||||||
|
"PodAndContainerLevelResourcesMultiPageSizeHugePagesMediumPrefix": {
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
medium: v1.StorageMediumHugePagesPrefix + "2Mi",
|
||||||
|
shouldFail: false,
|
||||||
|
expectedResult: "pagesize=2Mi",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for testCaseName, testCase := range testCases {
|
for testCaseName, testCase := range testCases {
|
||||||
t.Run(testCaseName, func(t *testing.T) {
|
t.Run(testCaseName, func(t *testing.T) {
|
||||||
|
if testCase.podLevelResourcesEnabled {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, true)
|
||||||
|
}
|
||||||
|
|
||||||
value, err := getPageSizeMountOption(testCase.medium, testCase.pod)
|
value, err := getPageSizeMountOption(testCase.medium, testCase.pod)
|
||||||
if testCase.shouldFail && err == nil {
|
if testCase.shouldFail && err == nil {
|
||||||
t.Errorf("%s: Unexpected success", testCaseName)
|
t.Errorf("%s: Unexpected success", testCaseName)
|
||||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||||||
package resource
|
package resource
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
)
|
)
|
||||||
@ -56,14 +58,14 @@ type PodResourcesOptions struct {
|
|||||||
var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory)
|
var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory)
|
||||||
|
|
||||||
func SupportedPodLevelResources() sets.Set[v1.ResourceName] {
|
func SupportedPodLevelResources() sets.Set[v1.ResourceName] {
|
||||||
return supportedPodLevelResources
|
return supportedPodLevelResources.Clone().Insert(v1.ResourceHugePagesPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSupportedPodLevelResources checks if a given resource is supported by pod-level
|
// IsSupportedPodLevelResources checks if a given resource is supported by pod-level
|
||||||
// resource management through the PodLevelResources feature. Returns true if
|
// resource management through the PodLevelResources feature. Returns true if
|
||||||
// the resource is supported.
|
// the resource is supported.
|
||||||
func IsSupportedPodLevelResource(name v1.ResourceName) bool {
|
func IsSupportedPodLevelResource(name v1.ResourceName) bool {
|
||||||
return supportedPodLevelResources.Has(name)
|
return supportedPodLevelResources.Has(name) || strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set.
|
// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set.
|
||||||
|
@ -1800,6 +1800,118 @@ func TestPodLevelResourceRequests(t *testing.T) {
|
|||||||
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
||||||
expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("15Mi"), v1.ResourceCPU: resource.MustParse("18m")},
|
expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("15Mi"), v1.ResourceCPU: resource.MustParse("18m")},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "pod-level resources, hugepage request/limit single page size",
|
||||||
|
podResources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("10Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
||||||
|
expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("10Mi"), v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod-level resources, hugepage request/limit multiple page sizes",
|
||||||
|
podResources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
||||||
|
expectedRequests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("2Mi"), v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod-level resources, container-level resources, hugepage request/limit single page size",
|
||||||
|
podResources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("6Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("6Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
||||||
|
expectedRequests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod-level resources, container-level resources, hugepage request/limit multiple page sizes",
|
||||||
|
podResources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
||||||
|
expectedRequests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"), v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("2Gi")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod-level resources, container-level resources, hugepage request/limit multiple page sizes between pod-level and container-level",
|
||||||
|
podResources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
|
v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Mi"),
|
||||||
|
v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: PodResourcesOptions{SkipPodLevelResources: false},
|
||||||
|
expectedRequests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("4Mi"), v1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("10Mi"), v1.ResourceHugePagesPrefix + "1Gi": resource.MustParse("1Gi")},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
@ -1811,6 +1923,47 @@ func TestPodLevelResourceRequests(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsSupportedPodLevelResource(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
resource v1.ResourceName
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: v1.ResourceCPU.String(),
|
||||||
|
resource: v1.ResourceCPU,
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: v1.ResourceMemory.String(),
|
||||||
|
resource: v1.ResourceMemory,
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: v1.ResourceEphemeralStorage.String(),
|
||||||
|
resource: v1.ResourceEphemeralStorage,
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: v1.ResourceHugePagesPrefix + "2Mi",
|
||||||
|
resource: v1.ResourceHugePagesPrefix + "2Mi",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: v1.ResourceHugePagesPrefix + "1Gi",
|
||||||
|
resource: v1.ResourceHugePagesPrefix + "1Gi",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if got := IsSupportedPodLevelResource(tc.resource); got != tc.expected {
|
||||||
|
t.Errorf("Supported pod level resource %s: got=%t, want=%t", tc.resource.String(), got, tc.expected)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAggregateContainerRequestsAndLimits(t *testing.T) {
|
func TestAggregateContainerRequestsAndLimits(t *testing.T) {
|
||||||
restartAlways := v1.ContainerRestartPolicyAlways
|
restartAlways := v1.ContainerRestartPolicyAlways
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||||
"k8s.io/kubernetes/test/e2e/feature"
|
"k8s.io/kubernetes/test/e2e/feature"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -177,8 +178,8 @@ func isHugePageAvailable(hugepagesSize int) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHugepagesTestPod(f *framework.Framework, limits v1.ResourceList, mounts []v1.VolumeMount, volumes []v1.Volume) *v1.Pod {
|
func getHugepagesTestPod(f *framework.Framework, podLimits v1.ResourceList, containerLimits v1.ResourceList, mounts []v1.VolumeMount, volumes []v1.Volume) *v1.Pod {
|
||||||
return &v1.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: "hugepages-",
|
GenerateName: "hugepages-",
|
||||||
Namespace: f.Namespace.Name,
|
Namespace: f.Namespace.Name,
|
||||||
@ -186,18 +187,110 @@ func getHugepagesTestPod(f *framework.Framework, limits v1.ResourceList, mounts
|
|||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "container" + string(uuid.NewUUID()),
|
Name: "container" + string(uuid.NewUUID()),
|
||||||
Image: busyboxImage,
|
Image: busyboxImage,
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: limits,
|
|
||||||
},
|
|
||||||
Command: []string{"sleep", "3600"},
|
Command: []string{"sleep", "3600"},
|
||||||
VolumeMounts: mounts,
|
VolumeMounts: mounts,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: containerLimits,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if podLimits != nil {
|
||||||
|
pod.Spec.Resources = &v1.ResourceRequirements{
|
||||||
|
Limits: podLimits,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHugepages(ctx context.Context, hugepages map[string]int) {
|
||||||
|
for hugepagesResource, count := range hugepages {
|
||||||
|
size := resourceToSize[hugepagesResource]
|
||||||
|
ginkgo.By(fmt.Sprintf("Verifying hugepages %d are supported", size))
|
||||||
|
if !isHugePageAvailable(size) {
|
||||||
|
e2eskipper.Skipf("skipping test because hugepages of size %d not supported", size)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Configuring the host to reserve %d of pre-allocated hugepages of size %d", count, size))
|
||||||
|
gomega.Eventually(ctx, func() error {
|
||||||
|
if err := configureHugePages(size, count, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, 30*time.Second, framework.Poll).Should(gomega.Succeed())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForHugepages(f *framework.Framework, ctx context.Context, hugepages map[string]int) {
|
||||||
|
ginkgo.By("Waiting for hugepages resource to become available on the local node")
|
||||||
|
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||||
|
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for hugepagesResource, count := range hugepages {
|
||||||
|
capacity, ok := node.Status.Capacity[v1.ResourceName(hugepagesResource)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("the node does not have the resource %s", hugepagesResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
size, succeed := capacity.AsInt64()
|
||||||
|
if !succeed {
|
||||||
|
return fmt.Errorf("failed to convert quantity to int64")
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedSize := count * resourceToSize[hugepagesResource] * 1024
|
||||||
|
if size != int64(expectedSize) {
|
||||||
|
return fmt.Errorf("the actual size %d is different from the expected one %d", size, expectedSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, time.Minute, framework.Poll).Should(gomega.Succeed())
|
||||||
|
}
|
||||||
|
|
||||||
|
func releaseHugepages(ctx context.Context, hugepages map[string]int) {
|
||||||
|
ginkgo.By("Releasing hugepages")
|
||||||
|
gomega.Eventually(ctx, func() error {
|
||||||
|
for hugepagesResource := range hugepages {
|
||||||
|
command := fmt.Sprintf("echo 0 > %s-%dkB/%s", hugepagesDirPrefix, resourceToSize[hugepagesResource], hugepagesCapacityFile)
|
||||||
|
if err := exec.Command("/bin/sh", "-c", command).Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, 30*time.Second, framework.Poll).Should(gomega.Succeed())
|
||||||
|
}
|
||||||
|
|
||||||
|
func runHugePagesTests(f *framework.Framework, ctx context.Context, testpod *v1.Pod, expectedHugepageLimits v1.ResourceList, mounts []v1.VolumeMount, hugepages map[string]int) {
|
||||||
|
ginkgo.By("getting mounts for the test pod")
|
||||||
|
command := []string{"mount"}
|
||||||
|
|
||||||
|
out := e2epod.ExecCommandInContainer(f, testpod.Name, testpod.Spec.Containers[0].Name, command...)
|
||||||
|
|
||||||
|
for _, mount := range mounts {
|
||||||
|
ginkgo.By(fmt.Sprintf("checking that the hugetlb mount %s exists under the container", mount.MountPath))
|
||||||
|
gomega.Expect(out).To(gomega.ContainSubstring(mount.MountPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
for resourceName := range hugepages {
|
||||||
|
verifyPod := makePodToVerifyHugePages(
|
||||||
|
"pod"+string(testpod.UID),
|
||||||
|
expectedHugepageLimits[v1.ResourceName(resourceName)],
|
||||||
|
resourceToCgroup[resourceName],
|
||||||
|
)
|
||||||
|
ginkgo.By("checking if the expected hugetlb settings were applied")
|
||||||
|
e2epod.NewPodClient(f).Create(ctx, verifyPod)
|
||||||
|
err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serial because the test updates kubelet configuration.
|
// Serial because the test updates kubelet configuration.
|
||||||
@ -255,108 +348,24 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
|
|
||||||
ginkgo.When("start the pod", func() {
|
ginkgo.When("start the pod", func() {
|
||||||
var (
|
var (
|
||||||
testpod *v1.Pod
|
testpod *v1.Pod
|
||||||
limits v1.ResourceList
|
expectedHugepageLimits v1.ResourceList
|
||||||
mounts []v1.VolumeMount
|
containerLimits v1.ResourceList
|
||||||
volumes []v1.Volume
|
mounts []v1.VolumeMount
|
||||||
hugepages map[string]int
|
volumes []v1.Volume
|
||||||
|
hugepages map[string]int
|
||||||
)
|
)
|
||||||
|
|
||||||
setHugepages := func(ctx context.Context) {
|
|
||||||
for hugepagesResource, count := range hugepages {
|
|
||||||
size := resourceToSize[hugepagesResource]
|
|
||||||
ginkgo.By(fmt.Sprintf("Verifying hugepages %d are supported", size))
|
|
||||||
if !isHugePageAvailable(size) {
|
|
||||||
e2eskipper.Skipf("skipping test because hugepages of size %d not supported", size)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Configuring the host to reserve %d of pre-allocated hugepages of size %d", count, size))
|
|
||||||
gomega.Eventually(ctx, func() error {
|
|
||||||
if err := configureHugePages(size, count, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
waitForHugepages := func(ctx context.Context) {
|
|
||||||
ginkgo.By("Waiting for hugepages resource to become available on the local node")
|
|
||||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for hugepagesResource, count := range hugepages {
|
|
||||||
capacity, ok := node.Status.Capacity[v1.ResourceName(hugepagesResource)]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("the node does not have the resource %s", hugepagesResource)
|
|
||||||
}
|
|
||||||
|
|
||||||
size, succeed := capacity.AsInt64()
|
|
||||||
if !succeed {
|
|
||||||
return fmt.Errorf("failed to convert quantity to int64")
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedSize := count * resourceToSize[hugepagesResource] * 1024
|
|
||||||
if size != int64(expectedSize) {
|
|
||||||
return fmt.Errorf("the actual size %d is different from the expected one %d", size, expectedSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, time.Minute, framework.Poll).Should(gomega.BeNil())
|
|
||||||
}
|
|
||||||
|
|
||||||
releaseHugepages := func(ctx context.Context) {
|
|
||||||
ginkgo.By("Releasing hugepages")
|
|
||||||
gomega.Eventually(ctx, func() error {
|
|
||||||
for hugepagesResource := range hugepages {
|
|
||||||
command := fmt.Sprintf("echo 0 > %s-%dkB/%s", hugepagesDirPrefix, resourceToSize[hugepagesResource], hugepagesCapacityFile)
|
|
||||||
if err := exec.Command("/bin/sh", "-c", command).Run(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
|
||||||
}
|
|
||||||
|
|
||||||
runHugePagesTests := func() {
|
|
||||||
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
|
||||||
ginkgo.By("getting mounts for the test pod")
|
|
||||||
command := []string{"mount"}
|
|
||||||
out := e2epod.ExecCommandInContainer(f, testpod.Name, testpod.Spec.Containers[0].Name, command...)
|
|
||||||
|
|
||||||
for _, mount := range mounts {
|
|
||||||
ginkgo.By(fmt.Sprintf("checking that the hugetlb mount %s exists under the container", mount.MountPath))
|
|
||||||
gomega.Expect(out).To(gomega.ContainSubstring(mount.MountPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
for resourceName := range hugepages {
|
|
||||||
verifyPod := makePodToVerifyHugePages(
|
|
||||||
"pod"+string(testpod.UID),
|
|
||||||
testpod.Spec.Containers[0].Resources.Limits[v1.ResourceName(resourceName)],
|
|
||||||
resourceToCgroup[resourceName],
|
|
||||||
)
|
|
||||||
ginkgo.By("checking if the expected hugetlb settings were applied")
|
|
||||||
e2epod.NewPodClient(f).Create(ctx, verifyPod)
|
|
||||||
err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup
|
// setup
|
||||||
ginkgo.JustBeforeEach(func(ctx context.Context) {
|
ginkgo.JustBeforeEach(func(ctx context.Context) {
|
||||||
setHugepages(ctx)
|
setHugepages(ctx, hugepages)
|
||||||
|
|
||||||
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
||||||
restartKubelet(ctx, true)
|
restartKubelet(ctx, true)
|
||||||
|
|
||||||
waitForHugepages(ctx)
|
waitForHugepages(f, ctx, hugepages)
|
||||||
|
|
||||||
pod := getHugepagesTestPod(f, limits, mounts, volumes)
|
pod := getHugepagesTestPod(f, nil, containerLimits, mounts, volumes)
|
||||||
|
|
||||||
ginkgo.By("by running a test pod that requests hugepages")
|
ginkgo.By("by running a test pod that requests hugepages")
|
||||||
testpod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
testpod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||||
@ -367,18 +376,21 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name))
|
ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name))
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
releaseHugepages(ctx)
|
releaseHugepages(ctx, hugepages)
|
||||||
|
|
||||||
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
||||||
restartKubelet(ctx, true)
|
restartKubelet(ctx, true)
|
||||||
|
|
||||||
waitForHugepages(ctx)
|
waitForHugepages(f, ctx, hugepages)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("with the resources requests that contain only one hugepages resource ", func() {
|
ginkgo.Context("with the resources requests that contain only one hugepages resource ", func() {
|
||||||
ginkgo.Context("with the backward compatible API", func() {
|
ginkgo.Context("with the backward compatible API", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
limits = v1.ResourceList{
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("10m"),
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
@ -402,12 +414,17 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
hugepages = map[string]int{hugepagesResourceName2Mi: 5}
|
hugepages = map[string]int{hugepagesResourceName2Mi: 5}
|
||||||
})
|
})
|
||||||
// run tests
|
// run tests
|
||||||
runHugePagesTests()
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("with the new API", func() {
|
ginkgo.Context("with the new API", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
limits = v1.ResourceList{
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("10m"),
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
@ -431,7 +448,9 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
hugepages = map[string]int{hugepagesResourceName2Mi: 5}
|
hugepages = map[string]int{hugepagesResourceName2Mi: 5}
|
||||||
})
|
})
|
||||||
|
|
||||||
runHugePagesTests()
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.JustAfterEach(func() {
|
ginkgo.JustAfterEach(func() {
|
||||||
@ -445,7 +464,11 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
hugepagesResourceName2Mi: 5,
|
hugepagesResourceName2Mi: 5,
|
||||||
hugepagesResourceName1Gi: 1,
|
hugepagesResourceName1Gi: 1,
|
||||||
}
|
}
|
||||||
limits = v1.ResourceList{
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("10m"),
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
@ -481,7 +504,443 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
runHugePagesTests()
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
hugepagesResourceName1Gi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// Serial because the test updates kubelet configuration.
|
||||||
|
var _ = SIGDescribe("Pod Level HugePages Resources", framework.WithSerial(), feature.PodLevelResources, func() {
|
||||||
|
f := framework.NewDefaultFramework("pod-level-hugepages-resources")
|
||||||
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
|
|
||||||
|
ginkgo.When("pod level resources", func() {
|
||||||
|
var (
|
||||||
|
testpod *v1.Pod
|
||||||
|
expectedHugepageLimits v1.ResourceList
|
||||||
|
podLimits v1.ResourceList
|
||||||
|
containerLimits v1.ResourceList
|
||||||
|
mounts []v1.VolumeMount
|
||||||
|
volumes []v1.Volume
|
||||||
|
hugepages map[string]int
|
||||||
|
)
|
||||||
|
|
||||||
|
// setup
|
||||||
|
ginkgo.JustBeforeEach(func(ctx context.Context) {
|
||||||
|
e2eskipper.SkipUnlessFeatureGateEnabled(features.PodLevelResources)
|
||||||
|
|
||||||
|
setHugepages(ctx, hugepages)
|
||||||
|
|
||||||
|
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
||||||
|
restartKubelet(ctx, true)
|
||||||
|
|
||||||
|
waitForHugepages(f, ctx, hugepages)
|
||||||
|
|
||||||
|
pod := getHugepagesTestPod(f, podLimits, containerLimits, mounts, volumes)
|
||||||
|
|
||||||
|
ginkgo.By("by running a test pod that requests hugepages")
|
||||||
|
|
||||||
|
testpod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||||
|
|
||||||
|
framework.Logf("Test pod name: %s", testpod.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
// we should use JustAfterEach because framework will teardown the client under the AfterEach method
|
||||||
|
ginkgo.JustAfterEach(func(ctx context.Context) {
|
||||||
|
ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name))
|
||||||
|
e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
|
releaseHugepages(ctx, hugepages)
|
||||||
|
|
||||||
|
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
||||||
|
restartKubelet(ctx, true)
|
||||||
|
|
||||||
|
waitForHugepages(f, ctx, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("pod hugepages, no container hugepages, single page size", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("pod hugepages, container hugepages, single page size", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("4Mi"),
|
||||||
|
}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("no pod hugepages, container hugepages, single page size", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("4Mi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("4Mi"),
|
||||||
|
}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("pod hugepages, no container hugepages, multiple page size", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
hugepagesResourceName1Gi: 1,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
MountPath: "/hugepages-1Gi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
hugepagesResourceName1Gi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("pod hugepages, container hugepages, multiple page size", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
hugepagesResourceName1Gi: 1,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("4Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
MountPath: "/hugepages-1Gi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
hugepagesResourceName1Gi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("no pod hugepages, container hugepages, multiple page size", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
hugepagesResourceName1Gi: 1,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("4Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("4Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
MountPath: "/hugepages-1Gi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustAfterEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 0,
|
||||||
|
hugepagesResourceName1Gi: 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("pod hugepages, container hugepages, different page size between pod and container level", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
hugepages = map[string]int{
|
||||||
|
hugepagesResourceName2Mi: 5,
|
||||||
|
hugepagesResourceName1Gi: 1,
|
||||||
|
}
|
||||||
|
expectedHugepageLimits = v1.ResourceList{
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
podLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName2Mi: resource.MustParse("6Mi"),
|
||||||
|
}
|
||||||
|
containerLimits = v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
hugepagesResourceName1Gi: resource.MustParse("1Gi"),
|
||||||
|
}
|
||||||
|
mounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
MountPath: "/hugepages-2Mi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
MountPath: "/hugepages-1Gi",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "hugepages-2mi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages2Mi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hugepages-1gi",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||||
|
Medium: mediumHugepages1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) {
|
||||||
|
runHugePagesTests(f, ctx, testpod, expectedHugepageLimits, mounts, hugepages)
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.JustAfterEach(func() {
|
ginkgo.JustAfterEach(func() {
|
||||||
hugepages = map[string]int{
|
hugepages = map[string]int{
|
||||||
|
Loading…
Reference in New Issue
Block a user