mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #69437 from ravisantoshgudimetla/promote-resource-limits-priority-function
Promote resource limits priority function to beta
This commit is contained in:
commit
897b3a9fa7
@ -420,7 +420,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||||||
CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta},
|
CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta},
|
||||||
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
|
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA},
|
StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA},
|
||||||
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
|
ResourceLimitsPriorityFunction: {Default: true, PreRelease: utilfeature.Beta},
|
||||||
SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA},
|
SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA},
|
||||||
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
|
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
|
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
|
@ -33,8 +33,10 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
@ -29,6 +29,8 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||||
@ -254,6 +256,49 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
|
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
|
||||||
})
|
})
|
||||||
|
It("Pod should be preferably scheduled to nodes which satisfy its limits", func() {
|
||||||
|
var podwithLargeRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("3000Mi"),
|
||||||
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Update one node to have large allocatable.
|
||||||
|
lastNode := nodeList.Items[len(nodeList.Items)-1]
|
||||||
|
nodeName := lastNode.Name
|
||||||
|
nodeOriginalMemory, found := lastNode.Status.Allocatable[v1.ResourceMemory]
|
||||||
|
Expect(found).To(Equal(true))
|
||||||
|
nodeOriginalMemoryVal := nodeOriginalMemory.Value()
|
||||||
|
err := updateMemoryOfNode(cs, nodeName, int64(10000))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
defer func() {
|
||||||
|
// Resize the node back to its original memory.
|
||||||
|
if err := updateMemoryOfNode(cs, nodeName, nodeOriginalMemoryVal); err != nil {
|
||||||
|
framework.Logf("Failed to revert node memory with %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
// After the above we should see 50% of node to be available which is 5000MiB for large node.
|
||||||
|
By("Create a pod with unusual large limits")
|
||||||
|
podWithLargeLimits := "with-large-limits"
|
||||||
|
|
||||||
|
pod := createPausePod(f, pausePodConfig{
|
||||||
|
Name: podWithLargeLimits,
|
||||||
|
Resources: podwithLargeRequestedResource,
|
||||||
|
})
|
||||||
|
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||||
|
|
||||||
|
By("Pod should preferably scheduled to nodes which satisfy its limits")
|
||||||
|
// The pod should land onto large node(which has 5000MiB free) which satisfies the pod limits which is 3000MiB.
|
||||||
|
podHighLimits, err := cs.CoreV1().Pods(ns).Get(podWithLargeLimits, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(podHighLimits.Spec.NodeName).To(Equal(nodeName))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.
|
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.
|
||||||
@ -399,3 +444,24 @@ func addRandomTaitToNode(cs clientset.Interface, nodeName string) *v1.Taint {
|
|||||||
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
|
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
|
||||||
return &testTaint
|
return &testTaint
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updateMemoryOfNode updates the memory of given node with the given value
|
||||||
|
func updateMemoryOfNode(c clientset.Interface, nodeName string, memory int64) error {
|
||||||
|
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
oldData, err := json.Marshal(node)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
node.Status.Allocatable[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
|
||||||
|
newData, err := json.Marshal(node)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user