mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Merge pull request #39546 from dashpole/dynamic_config_eviction_hard
Automatic merge from submit-queue (batch tested with PRs 39695, 37054, 39627, 39546, 39615) Use Dynamic Config in e2e_node inode eviction test Alternative solution to #39249. Similar to solution proposed by @vishh in #36828. @Random-Liu @mtaufen
This commit is contained in:
commit
a2da4f0cac
@ -109,10 +109,12 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||
},
|
||||
},
|
||||
}
|
||||
evictionTestTimeout := 60 * time.Minute
|
||||
evictionTestTimeout := 30 * time.Minute
|
||||
testCondition := "Disk Pressure due to Inodes"
|
||||
// Set the EvictionHard threshold lower to decrease test time
|
||||
evictionHardLimit := "nodefs.inodesFree<50%"
|
||||
|
||||
runEvictionTest(f, testCondition, podTestSpecs, evictionTestTimeout, hasInodePressure)
|
||||
runEvictionTest(f, testCondition, podTestSpecs, evictionHardLimit, evictionTestTimeout, hasInodePressure)
|
||||
})
|
||||
|
||||
// Struct used by runEvictionTest that specifies the pod, and when that pod should be evicted, relative to other pods
|
||||
@ -130,11 +132,12 @@ type podTestSpec struct {
|
||||
// It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
|
||||
// It ensures that all lower evictionPriority pods are eventually evicted.
|
||||
// runEvictionTest then cleans up the testing environment by deleting provided nodes, and ensures that testCondition no longer exists
|
||||
func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs []podTestSpec,
|
||||
func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs []podTestSpec, evictionHard string,
|
||||
evictionTestTimeout time.Duration, hasPressureCondition func(*framework.Framework, string) (bool, error)) {
|
||||
|
||||
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
||||
|
||||
tempSetEvictionHard(f, evictionHard)
|
||||
BeforeEach(func() {
|
||||
By("seting up pods to be used by tests")
|
||||
for _, spec := range podTestSpecs {
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
// utilconfig "k8s.io/kubernetes/pkg/util/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
@ -85,6 +86,51 @@ func getCurrentKubeletConfig() (*componentconfig.KubeletConfiguration, error) {
|
||||
return kubeCfg, nil
|
||||
}
|
||||
|
||||
// Convenience method to set the evictionHard threshold during the current context.
|
||||
func tempSetEvictionHard(f *framework.Framework, evictionHard string) {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||
initialConfig.EvictionHard = evictionHard
|
||||
})
|
||||
}
|
||||
|
||||
// Must be called within a Context. Allows the function to modify the KubeletConfiguration during the BeforeEach of the context.
|
||||
// The change is reverted in the AfterEach of the context.
|
||||
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *componentconfig.KubeletConfiguration)) {
|
||||
var oldCfg *componentconfig.KubeletConfiguration
|
||||
BeforeEach(func() {
|
||||
configEnabled, err := isKubeletConfigEnabled(f)
|
||||
framework.ExpectNoError(err)
|
||||
if configEnabled {
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
clone, err := api.Scheme.DeepCopy(oldCfg)
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := clone.(*componentconfig.KubeletConfiguration)
|
||||
updateFunction(newCfg)
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||
} else {
|
||||
framework.Logf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
||||
}
|
||||
})
|
||||
AfterEach(func() {
|
||||
if oldCfg != nil {
|
||||
err := setKubeletConfiguration(f, oldCfg)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Returns true if kubeletConfig is enabled, false otherwise or if we cannot determine if it is.
|
||||
func isKubeletConfigEnabled(f *framework.Framework) (bool, error) {
|
||||
cfgz, err := getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not determine whether 'DynamicKubeletConfig' feature is enabled, err: %v", err)
|
||||
}
|
||||
return strings.Contains(cfgz.FeatureGates, "DynamicKubeletConfig=true"), nil
|
||||
}
|
||||
|
||||
// Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName
|
||||
func getCurrentKubeletConfigMap(f *framework.Framework) (*v1.ConfigMap, error) {
|
||||
return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName), metav1.GetOptions{})
|
||||
@ -99,11 +145,11 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *componentconfig.Ku
|
||||
)
|
||||
|
||||
// Make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to reconfigure
|
||||
cfgz, err := getCurrentKubeletConfig()
|
||||
configEnabled, err := isKubeletConfigEnabled(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine whether 'DynamicKubeletConfig' feature is enabled, err: %v", err)
|
||||
}
|
||||
if !strings.Contains(cfgz.FeatureGates, "DynamicKubeletConfig=true") {
|
||||
if !configEnabled {
|
||||
return fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
||||
|
Loading…
Reference in New Issue
Block a user