Fixup incorrect use of DefaultFeatureGate.Set in tests

This commit is contained in:
Jordan Liggitt
2018-11-21 00:25:58 -05:00
parent d440ecdd3b
commit 4dca07ef7e
17 changed files with 157 additions and 296 deletions

View File

@@ -43,6 +43,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@@ -32,8 +32,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
genericfeatures "k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/pager"
@@ -170,9 +171,7 @@ func Test202StatusCode(t *testing.T) {
}
func TestAPIListChunking(t *testing.T) {
if err := utilfeature.DefaultFeatureGate.Set(string(genericfeatures.APIListChunking) + "=true"); err != nil {
t.Fatal(err)
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)()
s, clientSet, closeFn := setup(t)
defer closeFn()

View File

@@ -33,6 +33,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
@@ -485,21 +486,12 @@ func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
for _, fg := range featureGates() {
func() {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
}
}()
for _, f := range []bool{true, false} {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
}
for _, f := range []bool{true, false} {
func() {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
}
}()
}()
}
}
}
@@ -704,23 +696,10 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
})
}
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
}
}
// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity.
// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore.
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
// Rollback feature gate.
defer func() {
if enabled {
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
}
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
@@ -761,17 +740,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
// feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource
// on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource.
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.ScheduleDaemonSetPods, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)()
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
@@ -1012,16 +981,7 @@ func TestTaintedNode(t *testing.T) {
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.TaintNodesByCondition, enabledTaint)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
forEachFeatureGate(t, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {

View File

@@ -54,6 +54,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
@@ -64,7 +65,7 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
// TestPreemption tests a few preemption scenarios.
func TestPreemption(t *testing.T) {
// Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler.
context := initTest(t, "preemption")
defer cleanupTest(t, context)
@@ -292,7 +293,7 @@ func TestPreemption(t *testing.T) {
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
func TestDisablePreemption(t *testing.T) {
// Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler, and disable preemption.
context := initTestDisablePreemption(t, "disable-preemption")
defer cleanupTest(t, context)
@@ -394,7 +395,7 @@ func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace
// after preemption and while the higher priority pods is not scheduled yet.
func TestPreemptionStarvation(t *testing.T) {
// Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler.
context := initTest(t, "preemption")
defer cleanupTest(t, context)
@@ -501,7 +502,7 @@ func TestPreemptionStarvation(t *testing.T) {
// node name of the medium priority pod is cleared.
func TestNominatedNodeCleanUp(t *testing.T) {
// Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler.
context := initTest(t, "preemption")
defer cleanupTest(t, context)
@@ -615,7 +616,7 @@ func addPodConditionReady(pod *v1.Pod) {
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
func TestPDBInPreemption(t *testing.T) {
// Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler.
context := initTest(t, "preemption-pdb")
defer cleanupTest(t, context)

View File

@@ -28,10 +28,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
@@ -61,14 +63,8 @@ func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
func TestTaintNodeByCondition(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
defer func() {
if !enabled {
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
}
}()
// Enable TaintNodeByCondition
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
// Build PodToleration Admission.
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})

View File

@@ -36,10 +36,12 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@@ -95,11 +97,9 @@ type testPVC struct {
}
func TestVolumeBinding(t *testing.T) {
features := map[string]bool{
"VolumeScheduling": true,
"PersistentLocalVolumes": true,
}
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true)
defer config.teardown()
cases := map[string]struct {
@@ -268,11 +268,9 @@ func TestVolumeBinding(t *testing.T) {
// TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed.
func TestVolumeBindingRescheduling(t *testing.T) {
features := map[string]bool{
"VolumeScheduling": true,
"PersistentLocalVolumes": true,
}
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true)
defer config.teardown()
storageClassName := "local-storage"
@@ -414,11 +412,9 @@ func TestVolumeBindingDynamicStressSlow(t *testing.T) {
}
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
features := map[string]bool{
"VolumeScheduling": true,
"PersistentLocalVolumes": true,
}
config := setupCluster(t, "volume-binding-stress-", 1, features, schedulerResyncPeriod, provisionDelaySeconds, true)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
config := setupCluster(t, "volume-binding-stress-", 1, schedulerResyncPeriod, provisionDelaySeconds, true)
defer config.teardown()
// Set max volume limit to the number of PVCs the test will create
@@ -491,12 +487,10 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
}
func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) {
features := map[string]bool{
"VolumeScheduling": true,
"PersistentLocalVolumes": true,
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
// TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed
config := setupCluster(t, "volume-pod-affinity-", numNodes, features, 0, 0, true)
config := setupCluster(t, "volume-pod-affinity-", numNodes, 0, 0, true)
defer config.teardown()
pods := []*v1.Pod{}
@@ -621,11 +615,9 @@ func TestVolumeBindingWithAffinity(t *testing.T) {
}
func TestPVAffinityConflict(t *testing.T) {
features := map[string]bool{
"VolumeScheduling": true,
"PersistentLocalVolumes": true,
}
config := setupCluster(t, "volume-scheduling-", 3, features, 0, 0, true)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
config := setupCluster(t, "volume-scheduling-", 3, 0, 0, true)
defer config.teardown()
pv := makePV("local-pv", classImmediate, "", "", node1)
@@ -684,11 +676,9 @@ func TestPVAffinityConflict(t *testing.T) {
}
func TestVolumeProvision(t *testing.T) {
features := map[string]bool{
"VolumeScheduling": true,
"PersistentLocalVolumes": true,
}
config := setupCluster(t, "volume-scheduling", 1, features, 0, 0, true)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
config := setupCluster(t, "volume-scheduling", 1, 0, 0, true)
defer config.teardown()
cases := map[string]struct {
@@ -825,15 +815,8 @@ func TestVolumeProvision(t *testing.T) {
// selectedNode annotation from a claim to reschedule volume provision
// on provision failure.
func TestRescheduleProvisioning(t *testing.T) {
features := map[string]bool{
"VolumeScheduling": true,
}
oldFeatures := make(map[string]bool, len(features))
for feature := range features {
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
}
// Set feature gates
utilfeature.DefaultFeatureGate.SetFromMap(features)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
controllerCh := make(chan struct{})
context := initTestMaster(t, "reschedule-volume-provision", nil)
@@ -846,8 +829,6 @@ func TestRescheduleProvisioning(t *testing.T) {
deleteTestObjects(clientset, ns, nil)
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
context.closeFn()
// Restore feature gates
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
}()
ctrl, informerFactory, err := initPVController(context, 0)
@@ -893,14 +874,7 @@ func TestRescheduleProvisioning(t *testing.T) {
}
}
func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[string]bool, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
oldFeatures := make(map[string]bool, len(features))
for feature := range features {
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
}
// Set feature gates
utilfeature.DefaultFeatureGate.SetFromMap(features)
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), false, nil, false, disableEquivalenceCache, resyncPeriod)
clientset := context.clientSet
@@ -938,8 +912,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[s
teardown: func() {
deleteTestObjects(clientset, ns, nil)
cleanupTest(t, context)
// Restore feature gates
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
},
}
}