mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
feat: cleanup PodPriority features gate
This commit is contained in:
parent
0f2b01ab33
commit
ca6003bc75
@ -315,13 +315,6 @@ func dropDisabledFields(
|
||||
}
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) && !podPriorityInUse(oldPodSpec) {
|
||||
// Set to nil pod's priority fields if the feature is disabled and the old pod
|
||||
// does not specify any values for these fields.
|
||||
podSpec.Priority = nil
|
||||
podSpec.PriorityClassName = ""
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.Sysctls) && !sysctlsInUse(oldPodSpec) {
|
||||
if podSpec.SecurityContext != nil {
|
||||
podSpec.SecurityContext.Sysctls = nil
|
||||
|
@ -712,122 +712,6 @@ func TestDropProcMount(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropPodPriority(t *testing.T) {
|
||||
podPriority := int32(1000)
|
||||
podWithoutPriority := func() *api.Pod {
|
||||
return &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Priority: nil,
|
||||
PriorityClassName: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
podWithPriority := func() *api.Pod {
|
||||
return &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Priority: &podPriority,
|
||||
PriorityClassName: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
podWithPriorityClassOnly := func() *api.Pod {
|
||||
return &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Priority: nil,
|
||||
PriorityClassName: "HighPriorityClass",
|
||||
},
|
||||
}
|
||||
}
|
||||
podWithBothPriorityFields := func() *api.Pod {
|
||||
return &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Priority: &podPriority,
|
||||
PriorityClassName: "HighPriorityClass",
|
||||
},
|
||||
}
|
||||
}
|
||||
podInfo := []struct {
|
||||
description string
|
||||
hasPodPriority bool
|
||||
pod func() *api.Pod
|
||||
}{
|
||||
{
|
||||
description: "pod With no PodPriority fields set",
|
||||
hasPodPriority: false,
|
||||
pod: podWithoutPriority,
|
||||
},
|
||||
{
|
||||
description: "feature disabled and pod With PodPriority field set but class name not set",
|
||||
hasPodPriority: true,
|
||||
pod: podWithPriority,
|
||||
},
|
||||
{
|
||||
description: "feature disabled and pod With PodPriority ClassName field set but PortPriority not set",
|
||||
hasPodPriority: true,
|
||||
pod: podWithPriorityClassOnly,
|
||||
},
|
||||
{
|
||||
description: "feature disabled and pod With both PodPriority ClassName and PodPriority fields set",
|
||||
hasPodPriority: true,
|
||||
pod: podWithBothPriorityFields,
|
||||
},
|
||||
{
|
||||
description: "is nil",
|
||||
hasPodPriority: false,
|
||||
pod: func() *api.Pod { return nil },
|
||||
},
|
||||
}
|
||||
|
||||
for _, enabled := range []bool{true, false} {
|
||||
for _, oldPodInfo := range podInfo {
|
||||
for _, newPodInfo := range podInfo {
|
||||
oldPodHasPodPriority, oldPod := oldPodInfo.hasPodPriority, oldPodInfo.pod()
|
||||
newPodHasPodPriority, newPod := newPodInfo.hasPodPriority, newPodInfo.pod()
|
||||
if newPod == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, enabled)()
|
||||
|
||||
var oldPodSpec *api.PodSpec
|
||||
if oldPod != nil {
|
||||
oldPodSpec = &oldPod.Spec
|
||||
}
|
||||
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
||||
|
||||
// old pod should never be changed
|
||||
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
||||
t.Errorf("old pod changed: %v", diff.ObjectReflectDiff(oldPod, oldPodInfo.pod()))
|
||||
}
|
||||
|
||||
switch {
|
||||
case enabled || oldPodHasPodPriority:
|
||||
// new pod should not be changed if the feature is enabled, or if the old pod had PodPriority
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", diff.ObjectReflectDiff(newPod, newPodInfo.pod()))
|
||||
}
|
||||
case newPodHasPodPriority:
|
||||
// new pod should be changed
|
||||
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod was not changed")
|
||||
}
|
||||
// new pod should not have PodPriority
|
||||
if !reflect.DeepEqual(newPod, podWithoutPriority()) {
|
||||
t.Errorf("new pod had PodPriority: %v", diff.ObjectReflectDiff(newPod, podWithoutPriority()))
|
||||
}
|
||||
default:
|
||||
// new pod should not need to be changed
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", diff.ObjectReflectDiff(newPod, newPodInfo.pod()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropEmptyDirSizeLimit(t *testing.T) {
|
||||
sizeLimit := resource.MustParse("1Gi")
|
||||
podWithEmptyDirSizeLimit := func() *api.Pod {
|
||||
|
@ -6332,7 +6332,6 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
minGroupID := int64(0)
|
||||
maxGroupID := int64(2147483647)
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RuntimeClass, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
|
||||
|
@ -509,7 +509,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
Sysctls: {Default: true, PreRelease: featuregate.Beta},
|
||||
DebugContainers: {Default: false, PreRelease: featuregate.Alpha},
|
||||
PodShareProcessNamespace: {Default: true, PreRelease: featuregate.Beta},
|
||||
PodPriority: {Default: true, PreRelease: featuregate.GA},
|
||||
PodPriority: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18
|
||||
TaintNodesByCondition: {Default: true, PreRelease: featuregate.Beta},
|
||||
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
@ -138,12 +138,9 @@ func DefaultOffAdmissionPlugins() sets.String {
|
||||
validatingwebhook.PluginName, //ValidatingAdmissionWebhook
|
||||
resourcequota.PluginName, //ResourceQuota
|
||||
storageobjectinuseprotection.PluginName, //StorageObjectInUseProtection
|
||||
podpriority.PluginName, //PodPriority
|
||||
)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
defaultOnPlugins.Insert(podpriority.PluginName) //PodPriority
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) {
|
||||
defaultOnPlugins.Insert(nodetaint.PluginName) //TaintNodesByCondition
|
||||
}
|
||||
|
@ -182,7 +182,6 @@ type podToMake struct {
|
||||
|
||||
// TestMemoryPressure
|
||||
func TestMemoryPressure(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
@ -401,7 +400,6 @@ func parseQuantity(value string) resource.Quantity {
|
||||
|
||||
func TestDiskPressureNodeFs(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
|
||||
podMaker := makePodWithDiskStats
|
||||
summaryStatsMaker := makeDiskStats
|
||||
@ -600,7 +598,6 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
||||
|
||||
// TestMinReclaim verifies that min-reclaim works as desired.
|
||||
func TestMinReclaim(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
@ -740,7 +737,6 @@ func TestMinReclaim(t *testing.T) {
|
||||
|
||||
func TestNodeReclaimFuncs(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
|
||||
podMaker := makePodWithDiskStats
|
||||
summaryStatsMaker := makeDiskStats
|
||||
@ -917,7 +913,6 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInodePressureNodeFsInodes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := func(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) {
|
||||
pod := newPod(name, priority, []v1.Container{
|
||||
newContainer(name, requests, limits),
|
||||
@ -1139,7 +1134,6 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
||||
|
||||
// TestCriticalPodsAreNotEvicted
|
||||
func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
@ -1280,7 +1274,6 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
|
||||
// TestAllocatableMemoryPressure
|
||||
func TestAllocatableMemoryPressure(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
|
@ -513,10 +513,6 @@ func (ms *multiSorter) Less(i, j int) bool {
|
||||
|
||||
// priority compares pods by Priority, if priority is enabled.
|
||||
func priority(p1, p2 *v1.Pod) int {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
// If priority is not enabled, all pods are equal.
|
||||
return 0
|
||||
}
|
||||
priority1 := schedulerutils.GetPodPriority(p1)
|
||||
priority2 := schedulerutils.GetPodPriority(p2)
|
||||
if priority1 == priority2 {
|
||||
|
@ -483,8 +483,6 @@ func thresholdEqual(a evictionapi.Threshold, b evictionapi.Threshold) bool {
|
||||
}
|
||||
|
||||
func TestOrderedByExceedsRequestMemory(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
|
||||
below := newPod("below-requests", -1, []v1.Container{
|
||||
newContainer("below-requests", newResourceList("", "200Mi", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
@ -511,7 +509,6 @@ func TestOrderedByExceedsRequestMemory(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedByExceedsRequestDisk(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
below := newPod("below-requests", -1, []v1.Container{
|
||||
newContainer("below-requests", v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("200Mi")}, newResourceList("", "", "")),
|
||||
@ -539,7 +536,6 @@ func TestOrderedByExceedsRequestDisk(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedByPriority(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
low := newPod("low-priority", -134, []v1.Container{
|
||||
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
@ -561,30 +557,6 @@ func TestOrderedByPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderedByPriorityDisabled(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, false)()
|
||||
low := newPod("low-priority", lowPriority, []v1.Container{
|
||||
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
medium := newPod("medium-priority", defaultPriority, []v1.Container{
|
||||
newContainer("medium-priority", newResourceList("100m", "100Mi", ""), newResourceList("200m", "200Mi", "")),
|
||||
}, nil)
|
||||
high := newPod("high-priority", highPriority, []v1.Container{
|
||||
newContainer("high-priority", newResourceList("200m", "200Mi", ""), newResourceList("200m", "200Mi", "")),
|
||||
}, nil)
|
||||
|
||||
pods := []*v1.Pod{high, medium, low}
|
||||
orderedBy(priority).Sort(pods)
|
||||
|
||||
// orderedBy(priority) should not change the input ordering, since we did not enable the PodPriority feature gate
|
||||
expected := []*v1.Pod{high, medium, low}
|
||||
for i := range expected {
|
||||
if pods[i] != expected[i] {
|
||||
t.Errorf("Expected pod: %s, but got: %s", expected[i].Name, pods[i].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderedbyDisk(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{
|
||||
@ -719,7 +691,6 @@ func TestOrderedbyDiskDisableLocalStorage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedbyInodes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
low := newPod("low", defaultPriority, []v1.Container{
|
||||
newContainer("low", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
@ -763,7 +734,6 @@ func TestOrderedbyInodes(t *testing.T) {
|
||||
|
||||
// TestOrderedByPriorityDisk ensures we order pods by priority and then greediest resource consumer
|
||||
func TestOrderedByPriorityDisk(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
|
||||
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
@ -848,7 +818,6 @@ func TestOrderedByPriorityDisk(t *testing.T) {
|
||||
|
||||
// TestOrderedByPriorityInodes ensures we order pods by priority and then greediest resource consumer
|
||||
func TestOrderedByPriorityInodes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
pod1 := newPod("low-priority-high-usage", lowPriority, []v1.Container{
|
||||
newContainer("low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, []v1.Volume{
|
||||
@ -941,7 +910,6 @@ func TestOrderedByMemory(t *testing.T) {
|
||||
|
||||
// TestOrderedByPriorityMemory ensures we order by priority and then memory consumption relative to request.
|
||||
func TestOrderedByPriorityMemory(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
|
||||
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
|
@ -146,10 +146,8 @@ func (sp SyncPodType) String() string {
|
||||
// or equal to SystemCriticalPriority. Both the default scheduler and the kubelet use this function
|
||||
// to make admission and scheduling decisions.
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||
return true
|
||||
}
|
||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||
return true
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) {
|
||||
if IsCritical(pod.Namespace, pod.Annotations) {
|
||||
@ -165,11 +163,9 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool {
|
||||
if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) {
|
||||
return true
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
|
||||
(preemptee != nil && preemptee.Spec.Priority != nil) {
|
||||
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
|
||||
}
|
||||
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
|
||||
(preemptee != nil && preemptee.Spec.Priority != nil) {
|
||||
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
|
||||
}
|
||||
|
||||
return false
|
||||
|
@ -21,7 +21,6 @@ go_library(
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -465,7 +464,7 @@ func (sched *Scheduler) scheduleOne() {
|
||||
// will fit due to the preemption. It is also possible that a different pod will schedule
|
||||
// into the resources that were preempted, but this is harmless.
|
||||
if fitError, ok := err.(*core.FitError); ok {
|
||||
if !util.PodPriorityEnabled() || sched.config.DisablePreemption {
|
||||
if sched.config.DisablePreemption {
|
||||
klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." +
|
||||
" No preemption is performed.")
|
||||
} else {
|
||||
|
@ -30,12 +30,10 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/util",
|
||||
deps = [
|
||||
"//pkg/apis/scheduling:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
|
@ -19,14 +19,13 @@ package util
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair
|
||||
@ -44,11 +43,6 @@ func GetContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort {
|
||||
return ports
|
||||
}
|
||||
|
||||
// PodPriorityEnabled indicates whether pod priority feature is enabled.
|
||||
func PodPriorityEnabled() bool {
|
||||
return feature.DefaultFeatureGate.Enabled(features.PodPriority)
|
||||
}
|
||||
|
||||
// GetPodFullName returns a name that uniquely identifies a pod.
|
||||
func GetPodFullName(pod *v1.Pod) string {
|
||||
// Use underscore as the delimiter because it is not allowed in pod name
|
||||
|
@ -100,10 +100,6 @@ var (
|
||||
// Admit checks Pods and admits or rejects them. It also resolves the priority of pods based on their PriorityClass.
|
||||
// Note that pod validation mechanism prevents update of a pod priority.
|
||||
func (p *priorityPlugin) Admit(a admission.Attributes, o admission.ObjectInterfaces) error {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
return nil
|
||||
}
|
||||
|
||||
operation := a.GetOperation()
|
||||
// Ignore all calls to subresources
|
||||
if len(a.GetSubresource()) != 0 {
|
||||
|
@ -551,8 +551,6 @@ func TestPodAdmission(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
// Enable PodPriority feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Enable ExperimentalCriticalPodAnnotation feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
|
||||
// Enable NonPreemptingPriority feature gate.
|
||||
|
@ -23,18 +23,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -65,8 +62,6 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
|
||||
// TestPreemption tests a few preemption scenarios.
|
||||
func TestPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -293,8 +288,6 @@ func TestPreemption(t *testing.T) {
|
||||
|
||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler, and disable preemption.
|
||||
context := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -395,8 +388,6 @@ func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace
|
||||
// terminate, other pending lower priority pods are not scheduled in the room created
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
func TestPreemptionStarvation(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -615,8 +606,6 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
// 5. Check that nominated node name of the high priority pod is set and nominated
|
||||
// node name of the medium priority pod is cleared.
|
||||
func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -729,8 +718,6 @@ func addPodConditionReady(pod *v1.Pod) {
|
||||
|
||||
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption-pdb")
|
||||
defer cleanupTest(t, context)
|
||||
|
@ -156,8 +156,7 @@ func initTestScheduler(
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *testContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
// Pod preemption is enabled by default scheduler configuration.
|
||||
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, schedulerframework.NewRegistry(),
|
||||
nil, []schedulerconfig.PluginConfig{}, false, time.Second)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user