mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
e2e_node: drop usage of DynamicKubeletConfig
Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
parent
a5ed6c824a
commit
b6211657bf
@ -34,13 +34,14 @@ import (
|
|||||||
cpumanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
cpumanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
"github.com/onsi/ginkgo"
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Helper for makeCPUManagerPod().
|
// Helper for makeCPUManagerPod().
|
||||||
@ -60,12 +61,12 @@ func makeCPUManagerPod(podName string, ctnAttributes []ctnAttribute) *v1.Pod {
|
|||||||
Image: busyboxImage,
|
Image: busyboxImage,
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuRequest),
|
v1.ResourceCPU: resource.MustParse(ctnAttr.cpuRequest),
|
||||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuLimit),
|
v1.ResourceCPU: resource.MustParse(ctnAttr.cpuLimit),
|
||||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Command: []string{"sh", "-c", cpusetCmd},
|
Command: []string{"sh", "-c", cpusetCmd},
|
||||||
@ -109,7 +110,7 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa
|
|||||||
// RoundUp reserved CPUs to get only integer cores.
|
// RoundUp reserved CPUs to get only integer cores.
|
||||||
cpuRes.RoundUp(0)
|
cpuRes.RoundUp(0)
|
||||||
|
|
||||||
return cpuCap.Value(), (cpuCap.Value() - cpuRes.Value()), cpuRes.Value()
|
return cpuCap.Value(), cpuCap.Value() - cpuRes.Value(), cpuRes.Value()
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForContainerRemoval(containerName, podName, podNS string) {
|
func waitForContainerRemoval(containerName, podName, podNS string) {
|
||||||
@ -190,71 +191,75 @@ func deleteStateFile() {
|
|||||||
framework.ExpectNoError(err, "error deleting state file")
|
framework.ExpectNoError(err, "error deleting state file")
|
||||||
}
|
}
|
||||||
|
|
||||||
func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletConfiguration) {
|
func setOldKubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||||
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
// Delete the CPU Manager state file so that the old Kubelet configuration
|
// Delete the CPU Manager state file so that the old Kubelet configuration
|
||||||
// can take effect.i
|
// can take effect
|
||||||
deleteStateFile()
|
deleteStateFile()
|
||||||
|
|
||||||
if oldCfg != nil {
|
if oldCfg != nil {
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
|
}
|
||||||
|
|
||||||
|
type cpuManagerKubeletArguments struct {
|
||||||
|
policyName string
|
||||||
|
cleanStateFile bool
|
||||||
|
enableCPUManager bool
|
||||||
|
enableCPUManagerOptions bool
|
||||||
|
reservedSystemCPUs cpuset.CPUSet
|
||||||
|
options map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||||
// Disable CPU Manager in Kubelet.
|
// Disable CPU Manager in Kubelet.
|
||||||
oldCfg, err := getCurrentKubeletConfig()
|
oldCfg = configureCPUManagerInKubelet(f, &cpuManagerKubeletArguments{
|
||||||
framework.ExpectNoError(err)
|
enableCPUManager: false,
|
||||||
newCfg := oldCfg.DeepCopy()
|
})
|
||||||
if newCfg.FeatureGates == nil {
|
|
||||||
newCfg.FeatureGates = make(map[string]bool)
|
|
||||||
}
|
|
||||||
newCfg.FeatureGates["CPUManager"] = false
|
|
||||||
|
|
||||||
// Update the Kubelet configuration.
|
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
|
||||||
gomega.Eventually(func() bool {
|
|
||||||
nodes, err := e2enode.TotalReady(f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
return nodes == 1
|
|
||||||
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
|
||||||
|
|
||||||
return oldCfg
|
return oldCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func configureCPUManagerInKubelet(f *framework.Framework, policyName string, cleanStateFile bool, reservedSystemCPUs cpuset.CPUSet, enableOptions bool, options map[string]string) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
func configureCPUManagerInKubelet(f *framework.Framework, kubeletArguments *cpuManagerKubeletArguments) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||||
// Enable CPU Manager in Kubelet with static policy.
|
// Enable CPU Manager in Kubelet with static policy.
|
||||||
oldCfg, err := getCurrentKubeletConfig()
|
oldCfg, err := getCurrentKubeletConfig()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
newCfg := oldCfg.DeepCopy()
|
newCfg := oldCfg.DeepCopy()
|
||||||
if newCfg.FeatureGates == nil {
|
if newCfg.FeatureGates == nil {
|
||||||
newCfg.FeatureGates = make(map[string]bool)
|
newCfg.FeatureGates = make(map[string]bool)
|
||||||
}
|
}
|
||||||
newCfg.FeatureGates["CPUManager"] = true
|
|
||||||
newCfg.FeatureGates["CPUManagerPolicyOptions"] = enableOptions
|
|
||||||
newCfg.FeatureGates["CPUManagerPolicyBetaOptions"] = enableOptions
|
|
||||||
newCfg.FeatureGates["CPUManagerPolicyAlphaOptions"] = enableOptions
|
|
||||||
|
|
||||||
// After graduation of the CPU Manager feature to Beta, the CPU Manager
|
newCfg.FeatureGates["CPUManager"] = kubeletArguments.enableCPUManager
|
||||||
// "none" policy is ON by default. But when we set the CPU Manager policy to
|
|
||||||
// "static" in this test and the Kubelet is restarted so that "static"
|
newCfg.FeatureGates["CPUManagerPolicyOptions"] = kubeletArguments.enableCPUManagerOptions
|
||||||
// policy can take effect, there will always be a conflict with the state
|
newCfg.FeatureGates["CPUManagerPolicyBetaOptions"] = kubeletArguments.enableCPUManagerOptions
|
||||||
// checkpointed in the disk (i.e., the policy checkpointed in the disk will
|
newCfg.FeatureGates["CPUManagerPolicyAlphaOptions"] = kubeletArguments.enableCPUManagerOptions
|
||||||
// be "none" whereas we are trying to restart Kubelet with "static"
|
|
||||||
// policy). Therefore, we delete the state file so that we can proceed
|
newCfg.CPUManagerPolicy = kubeletArguments.policyName
|
||||||
// with the tests.
|
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
|
||||||
// Only delete the state file at the begin of the tests.
|
|
||||||
if cleanStateFile {
|
if kubeletArguments.options != nil {
|
||||||
deleteStateFile()
|
newCfg.CPUManagerPolicyOptions = kubeletArguments.options
|
||||||
}
|
}
|
||||||
|
|
||||||
newCfg.CPUManagerPolicy = policyName
|
if kubeletArguments.reservedSystemCPUs.Size() > 0 {
|
||||||
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
|
cpus := kubeletArguments.reservedSystemCPUs.String()
|
||||||
newCfg.CPUManagerPolicyOptions = options
|
|
||||||
|
|
||||||
if reservedSystemCPUs.Size() > 0 {
|
|
||||||
cpus := reservedSystemCPUs.String()
|
|
||||||
framework.Logf("configureCPUManagerInKubelet: using reservedSystemCPUs=%q", cpus)
|
framework.Logf("configureCPUManagerInKubelet: using reservedSystemCPUs=%q", cpus)
|
||||||
newCfg.ReservedSystemCPUs = cpus
|
newCfg.ReservedSystemCPUs = cpus
|
||||||
} else {
|
} else {
|
||||||
@ -269,8 +274,37 @@ func configureCPUManagerInKubelet(f *framework.Framework, policyName string, cle
|
|||||||
newCfg.KubeReserved["cpu"] = "200m"
|
newCfg.KubeReserved["cpu"] = "200m"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update the Kubelet configuration.
|
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
// After graduation of the CPU Manager feature to Beta, the CPU Manager
|
||||||
|
// "none" policy is ON by default. But when we set the CPU Manager policy to
|
||||||
|
// "static" in this test and the Kubelet is restarted so that "static"
|
||||||
|
// policy can take effect, there will always be a conflict with the state
|
||||||
|
// checkpointed in the disk (i.e., the policy checkpointed in the disk will
|
||||||
|
// be "none" whereas we are trying to restart Kubelet with "static"
|
||||||
|
// policy). Therefore, we delete the state file so that we can proceed
|
||||||
|
// with the tests.
|
||||||
|
// Only delete the state file at the begin of the tests.
|
||||||
|
if kubeletArguments.cleanStateFile {
|
||||||
|
deleteStateFile()
|
||||||
|
}
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
@ -590,7 +624,12 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Enable CPU Manager in the kubelet.
|
// Enable CPU Manager in the kubelet.
|
||||||
oldCfg = configureCPUManagerInKubelet(f, string(cpumanager.PolicyStatic), true, cpuset.CPUSet{}, false, nil)
|
oldCfg = configureCPUManagerInKubelet(f, &cpuManagerKubeletArguments{
|
||||||
|
policyName: string(cpumanager.PolicyStatic),
|
||||||
|
enableCPUManager: true,
|
||||||
|
reservedSystemCPUs: cpuset.CPUSet{},
|
||||||
|
cleanStateFile: true,
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.By("running a non-Gu pod")
|
ginkgo.By("running a non-Gu pod")
|
||||||
runNonGuPodTest(f, cpuCap)
|
runNonGuPodTest(f, cpuCap)
|
||||||
@ -657,7 +696,11 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
ginkgo.By("enable cpu manager in kubelet without delete state file")
|
ginkgo.By("enable cpu manager in kubelet without delete state file")
|
||||||
configureCPUManagerInKubelet(f, string(cpumanager.PolicyStatic), false, cpuset.CPUSet{}, false, nil)
|
configureCPUManagerInKubelet(f, &cpuManagerKubeletArguments{
|
||||||
|
policyName: string(cpumanager.PolicyStatic),
|
||||||
|
enableCPUManager: true,
|
||||||
|
reservedSystemCPUs: cpuset.CPUSet{},
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.By("wait for the deleted pod to be cleaned up from the state file")
|
ginkgo.By("wait for the deleted pod to be cleaned up from the state file")
|
||||||
waitForStateFileCleanedUp()
|
waitForStateFileCleanedUp()
|
||||||
@ -681,13 +724,20 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
framework.Logf("SMT level %d", smtLevel)
|
framework.Logf("SMT level %d", smtLevel)
|
||||||
|
|
||||||
cleanStateFile := true
|
|
||||||
// TODO: we assume the first available CPUID is 0, which is pretty fair, but we should probably
|
// TODO: we assume the first available CPUID is 0, which is pretty fair, but we should probably
|
||||||
// check what we do have in the node.
|
// check what we do have in the node.
|
||||||
cpuPolicyOptions := map[string]string{
|
cpuPolicyOptions := map[string]string{
|
||||||
cpumanager.FullPCPUsOnlyOption: "true",
|
cpumanager.FullPCPUsOnlyOption: "true",
|
||||||
}
|
}
|
||||||
oldCfg = configureCPUManagerInKubelet(f, string(cpumanager.PolicyStatic), cleanStateFile, cpuset.NewCPUSet(0), true, cpuPolicyOptions)
|
oldCfg = configureCPUManagerInKubelet(f,
|
||||||
|
&cpuManagerKubeletArguments{
|
||||||
|
policyName: string(cpumanager.PolicyStatic),
|
||||||
|
enableCPUManager: true,
|
||||||
|
cleanStateFile: true,
|
||||||
|
reservedSystemCPUs: cpuset.NewCPUSet(0),
|
||||||
|
enableCPUManagerOptions: true,
|
||||||
|
options: cpuPolicyOptions,
|
||||||
|
})
|
||||||
|
|
||||||
// the order between negative and positive doesn't really matter
|
// the order between negative and positive doesn't really matter
|
||||||
runSMTAlignmentNegativeTests(f)
|
runSMTAlignmentNegativeTests(f)
|
||||||
@ -695,7 +745,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
//go:build linux
|
||||||
|
// +build linux
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2017 The Kubernetes Authors.
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
@ -40,6 +43,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -228,11 +232,24 @@ func getUpdatedKubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration, params
|
|||||||
}
|
}
|
||||||
|
|
||||||
func updateKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
|
func updateKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
|
||||||
// remove the state file
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(cfg))
|
||||||
deleteMemoryManagerStateFile()
|
deleteMemoryManagerStateFile()
|
||||||
|
|
||||||
// Update the Kubelet configuration
|
ginkgo.By("Starting the kubelet")
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, cfg))
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
@ -264,7 +281,7 @@ func getAllNUMANodes() []int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Serial because the test updates kubelet configuration.
|
// Serial because the test updates kubelet configuration.
|
||||||
var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager]", func() {
|
var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager]", func() {
|
||||||
// TODO: add more complex tests that will include interaction between CPUManager, MemoryManager and TopologyManager
|
// TODO: add more complex tests that will include interaction between CPUManager, MemoryManager and TopologyManager
|
||||||
var (
|
var (
|
||||||
allNUMANodes []int
|
allNUMANodes []int
|
||||||
@ -305,53 +322,7 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager]", func() {
|
|||||||
framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.ToSlice())
|
framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.ToSlice())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
waitingForHugepages := func(hugepagesCount int) {
|
||||||
if isMultiNUMASupported == nil {
|
|
||||||
isMultiNUMASupported = pointer.BoolPtr(isMultiNUMA())
|
|
||||||
}
|
|
||||||
|
|
||||||
if is2MiHugepagesSupported == nil {
|
|
||||||
is2MiHugepagesSupported = pointer.BoolPtr(isHugePageAvailable(hugepagesSize2M))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(allNUMANodes) == 0 {
|
|
||||||
allNUMANodes = getAllNUMANodes()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// dynamically update the kubelet configuration
|
|
||||||
ginkgo.JustBeforeEach(func() {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// allocate hugepages
|
|
||||||
if *is2MiHugepagesSupported {
|
|
||||||
hugepagesCount := 256
|
|
||||||
ginkgo.By("Configuring hugepages")
|
|
||||||
gomega.Eventually(func() error {
|
|
||||||
if err := configureHugePages(hugepagesSize2M, hugepagesCount); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
|
||||||
|
|
||||||
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
|
||||||
|
|
||||||
// stop the kubelet and wait until the server will restart it automatically
|
|
||||||
stopKubelet()
|
|
||||||
|
|
||||||
// wait until the kubelet health check will fail
|
|
||||||
gomega.Eventually(func() bool {
|
|
||||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
|
||||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
|
||||||
|
|
||||||
restartKubelet(false)
|
|
||||||
|
|
||||||
// wait until the kubelet health check will pass
|
|
||||||
gomega.Eventually(func() bool {
|
|
||||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
|
||||||
}, 2*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
|
||||||
|
|
||||||
ginkgo.By("Waiting for hugepages resource to become available on the local node")
|
|
||||||
gomega.Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -377,9 +348,40 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager]", func() {
|
|||||||
}, time.Minute, framework.Poll).Should(gomega.BeNil())
|
}, time.Minute, framework.Poll).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
if isMultiNUMASupported == nil {
|
||||||
|
isMultiNUMASupported = pointer.BoolPtr(isMultiNUMA())
|
||||||
|
}
|
||||||
|
|
||||||
|
if is2MiHugepagesSupported == nil {
|
||||||
|
is2MiHugepagesSupported = pointer.BoolPtr(isHugePageAvailable(hugepagesSize2M))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(allNUMANodes) == 0 {
|
||||||
|
allNUMANodes = getAllNUMANodes()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// dynamically update the kubelet configuration
|
||||||
|
ginkgo.JustBeforeEach(func() {
|
||||||
|
var err error
|
||||||
|
hugepagesCount := 8
|
||||||
|
|
||||||
|
// allocate hugepages
|
||||||
|
if *is2MiHugepagesSupported {
|
||||||
|
ginkgo.By("Configuring hugepages")
|
||||||
|
gomega.Eventually(func() error {
|
||||||
|
return configureHugePages(hugepagesSize2M, hugepagesCount)
|
||||||
|
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
||||||
|
}
|
||||||
|
|
||||||
// get the old kubelet config
|
// get the old kubelet config
|
||||||
oldCfg, err = getCurrentKubeletConfig()
|
if oldCfg == nil {
|
||||||
framework.ExpectNoError(err)
|
gomega.Eventually(func() error {
|
||||||
|
oldCfg, err = e2enodekubelet.GetCurrentKubeletConfigFromFile()
|
||||||
|
return err
|
||||||
|
}, 5*time.Minute, 15*time.Second).Should(gomega.BeNil())
|
||||||
|
}
|
||||||
|
|
||||||
// update the kubelet config with new parameters
|
// update the kubelet config with new parameters
|
||||||
newCfg := getUpdatedKubeletConfig(oldCfg, kubeParams)
|
newCfg := getUpdatedKubeletConfig(oldCfg, kubeParams)
|
||||||
@ -387,8 +389,11 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager]", func() {
|
|||||||
|
|
||||||
// request hugepages resources under the container
|
// request hugepages resources under the container
|
||||||
if *is2MiHugepagesSupported {
|
if *is2MiHugepagesSupported {
|
||||||
|
ginkgo.By("Waiting for hugepages resource to become available on the local node")
|
||||||
|
waitingForHugepages(hugepagesCount)
|
||||||
|
|
||||||
for i := 0; i < len(ctnParams); i++ {
|
for i := 0; i < len(ctnParams); i++ {
|
||||||
ctnParams[i].hugepages2Mi = "128Mi"
|
ctnParams[i].hugepages2Mi = "8Mi"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,20 +409,21 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// release hugepages
|
// release hugepages
|
||||||
|
if *is2MiHugepagesSupported {
|
||||||
|
ginkgo.By("Releasing allocated hugepages")
|
||||||
gomega.Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return configureHugePages(hugepagesSize2M, 0)
|
return configureHugePages(hugepagesSize2M, 0)
|
||||||
}, 90*time.Second, 15*time.Second).ShouldNot(gomega.HaveOccurred(), "failed to release hugepages")
|
}, 90*time.Second, 15*time.Second).ShouldNot(gomega.HaveOccurred(), "failed to release hugepages")
|
||||||
|
}
|
||||||
|
|
||||||
// update the kubelet config with old values
|
// update the kubelet config with old parameters
|
||||||
|
if oldCfg != nil {
|
||||||
updateKubeletConfig(f, oldCfg)
|
updateKubeletConfig(f, oldCfg)
|
||||||
|
}
|
||||||
|
|
||||||
// wait until the kubelet health check will pass and will continue to pass for specified period of time
|
if *is2MiHugepagesSupported {
|
||||||
gomega.Eventually(func() bool {
|
waitingForHugepages(0)
|
||||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
}
|
||||||
}, time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
|
||||||
gomega.Consistently(func() bool {
|
|
||||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
|
||||||
}, time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("with static policy", func() {
|
ginkgo.Context("with static policy", func() {
|
||||||
|
@ -34,8 +34,10 @@ import (
|
|||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
|
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -183,7 +185,24 @@ func runTest(f *framework.Framework) error {
|
|||||||
defer destroyTemporaryCgroupsForReservation(cgroupManager)
|
defer destroyTemporaryCgroupsForReservation(cgroupManager)
|
||||||
defer func() {
|
defer func() {
|
||||||
if oldCfg != nil {
|
if oldCfg != nil {
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
// Update the Kubelet configuration.
|
||||||
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
|
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
|
||||||
@ -193,7 +212,25 @@ func runTest(f *framework.Framework) error {
|
|||||||
// Change existing kubelet configuration
|
// Change existing kubelet configuration
|
||||||
setDesiredConfiguration(newCfg)
|
setDesiredConfiguration(newCfg)
|
||||||
// Set the new kubelet configuration.
|
// Set the new kubelet configuration.
|
||||||
err = setKubeletConfiguration(f, newCfg)
|
// Update the Kubelet configuration.
|
||||||
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -24,10 +24,12 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
|
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -46,7 +48,24 @@ func makeNodePerfPod(w workloads.NodePerfWorkload) *v1.Pod {
|
|||||||
|
|
||||||
func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
|
func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
|
||||||
if cfg != nil {
|
if cfg != nil {
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, cfg))
|
// Update the Kubelet configuration.
|
||||||
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(cfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -494,7 +495,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func podresourcesGetAllocatableResourcesTests(f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
|
func podresourcesGetAllocatableResourcesTests(cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
|
||||||
ginkgo.By("checking the devices known to the kubelet")
|
ginkgo.By("checking the devices known to the kubelet")
|
||||||
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||||
framework.ExpectNoErrorWithOffset(1, err)
|
framework.ExpectNoErrorWithOffset(1, err)
|
||||||
@ -553,7 +554,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
oldCfg := configurePodResourcesInKubelet(f, true, reservedSystemCPUs)
|
oldCfg := configurePodResourcesInKubelet(f, true, reservedSystemCPUs)
|
||||||
defer func() {
|
defer func() {
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
|
|
||||||
// Delete state file to allow repeated runs
|
// Delete state file to allow repeated runs
|
||||||
deleteStateFile()
|
deleteStateFile()
|
||||||
@ -577,7 +578,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
ginkgo.By("checking List()")
|
ginkgo.By("checking List()")
|
||||||
podresourcesListTests(f, cli, sd)
|
podresourcesListTests(f, cli, sd)
|
||||||
ginkgo.By("checking GetAllocatableResources()")
|
ginkgo.By("checking GetAllocatableResources()")
|
||||||
podresourcesGetAllocatableResourcesTests(f, cli, sd, onlineCPUs, reservedSystemCPUs)
|
podresourcesGetAllocatableResourcesTests(cli, sd, onlineCPUs, reservedSystemCPUs)
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -589,7 +590,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
oldCfg := enablePodResourcesFeatureGateInKubelet(f)
|
oldCfg := enablePodResourcesFeatureGateInKubelet(f)
|
||||||
defer func() {
|
defer func() {
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
|
|
||||||
// Delete state file to allow repeated runs
|
// Delete state file to allow repeated runs
|
||||||
deleteStateFile()
|
deleteStateFile()
|
||||||
@ -612,7 +613,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
|
|
||||||
// intentionally passing empty cpuset instead of onlineCPUs because with none policy
|
// intentionally passing empty cpuset instead of onlineCPUs because with none policy
|
||||||
// we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static
|
// we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static
|
||||||
podresourcesGetAllocatableResourcesTests(f, cli, sd, cpuset.CPUSet{}, cpuset.CPUSet{})
|
podresourcesGetAllocatableResourcesTests(cli, sd, cpuset.CPUSet{}, cpuset.CPUSet{})
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
@ -635,7 +636,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
oldCfg := configurePodResourcesInKubelet(f, true, reservedSystemCPUs)
|
oldCfg := configurePodResourcesInKubelet(f, true, reservedSystemCPUs)
|
||||||
defer func() {
|
defer func() {
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
|
|
||||||
// Delete state file to allow repeated runs
|
// Delete state file to allow repeated runs
|
||||||
deleteStateFile()
|
deleteStateFile()
|
||||||
@ -649,7 +650,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
podresourcesListTests(f, cli, nil)
|
podresourcesListTests(f, cli, nil)
|
||||||
podresourcesGetAllocatableResourcesTests(f, cli, nil, onlineCPUs, reservedSystemCPUs)
|
podresourcesGetAllocatableResourcesTests(cli, nil, onlineCPUs, reservedSystemCPUs)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should return the expected responses with cpumanager none policy", func() {
|
ginkgo.It("should return the expected responses with cpumanager none policy", func() {
|
||||||
@ -660,7 +661,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
oldCfg := enablePodResourcesFeatureGateInKubelet(f)
|
oldCfg := enablePodResourcesFeatureGateInKubelet(f)
|
||||||
defer func() {
|
defer func() {
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
|
|
||||||
// Delete state file to allow repeated runs
|
// Delete state file to allow repeated runs
|
||||||
deleteStateFile()
|
deleteStateFile()
|
||||||
@ -675,7 +676,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
|
|
||||||
// intentionally passing empty cpuset instead of onlineCPUs because with none policy
|
// intentionally passing empty cpuset instead of onlineCPUs because with none policy
|
||||||
// we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static
|
// we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static
|
||||||
podresourcesGetAllocatableResourcesTests(f, cli, nil, cpuset.CPUSet{}, cpuset.CPUSet{})
|
podresourcesGetAllocatableResourcesTests(cli, nil, cpuset.CPUSet{}, cpuset.CPUSet{})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should return the expected error with the feature gate disabled", func() {
|
ginkgo.It("should return the expected error with the feature gate disabled", func() {
|
||||||
@ -717,7 +718,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
oldCfg := configurePodResourcesInKubelet(f, true, reservedSystemCPUs)
|
oldCfg := configurePodResourcesInKubelet(f, true, reservedSystemCPUs)
|
||||||
defer func() {
|
defer func() {
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
|
|
||||||
// Delete state file to allow repeated runs
|
// Delete state file to allow repeated runs
|
||||||
deleteStateFile()
|
deleteStateFile()
|
||||||
@ -829,7 +830,23 @@ func configurePodResourcesInKubelet(f *framework.Framework, cleanStateFile bool,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update the Kubelet configuration.
|
// Update the Kubelet configuration.
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
@ -849,8 +866,23 @@ func enablePodResourcesFeatureGateInKubelet(f *framework.Framework) (oldCfg *kub
|
|||||||
newCfg.FeatureGates = make(map[string]bool)
|
newCfg.FeatureGates = make(map[string]bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the Kubelet configuration.
|
ginkgo.By("Stopping the kubelet")
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
|
@ -27,23 +27,23 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -237,8 +237,18 @@ func configureTopologyManagerInKubelet(f *framework.Framework, oldCfg *kubeletco
|
|||||||
// Dump the config -- debug
|
// Dump the config -- debug
|
||||||
framework.Logf("New kubelet config is %s", *newCfg)
|
framework.Logf("New kubelet config is %s", *newCfg)
|
||||||
|
|
||||||
// Update the Kubelet configuration.
|
ginkgo.By("Stopping the kubelet")
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
@ -947,7 +957,7 @@ func runTopologyManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(oldCfg)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,10 +50,12 @@ import (
|
|||||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -157,12 +159,7 @@ func getCurrentKubeletConfig() (*kubeletconfig.KubeletConfiguration, error) {
|
|||||||
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *kubeletconfig.KubeletConfiguration)) {
|
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *kubeletconfig.KubeletConfiguration)) {
|
||||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
configEnabled, err := isKubeletConfigEnabled(f)
|
oldCfg, err := getCurrentKubeletConfig()
|
||||||
framework.ExpectNoError(err)
|
|
||||||
framework.ExpectEqual(configEnabled, true, "The Dynamic Kubelet Configuration feature is not enabled.\n"+
|
|
||||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n"+
|
|
||||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
|
||||||
oldCfg, err = getCurrentKubeletConfig()
|
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
newCfg := oldCfg.DeepCopy()
|
newCfg := oldCfg.DeepCopy()
|
||||||
updateFunction(newCfg)
|
updateFunction(newCfg)
|
||||||
@ -170,12 +167,45 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
// Update the Kubelet configuration.
|
||||||
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if oldCfg != nil {
|
if oldCfg != nil {
|
||||||
err := setKubeletConfiguration(f, oldCfg)
|
// Update the Kubelet configuration.
|
||||||
framework.ExpectNoError(err)
|
ginkgo.By("Stopping the kubelet")
|
||||||
|
startKubelet := stopKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will fail
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||||
|
|
||||||
|
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg))
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet")
|
||||||
|
startKubelet()
|
||||||
|
|
||||||
|
// wait until the kubelet health check will succeed
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||||
|
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -193,64 +223,6 @@ func isKubeletConfigEnabled(f *framework.Framework) (bool, error) {
|
|||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates or updates the configmap for KubeletConfiguration, waits for the Kubelet to restart
|
|
||||||
// with the new configuration. Returns an error if the configuration after waiting for restartGap
|
|
||||||
// doesn't match what you attempted to set, or if the dynamic configuration feature is disabled.
|
|
||||||
// You should only call this from serial tests.
|
|
||||||
func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.KubeletConfiguration) error {
|
|
||||||
const (
|
|
||||||
restartGap = 40 * time.Second
|
|
||||||
pollInterval = 5 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to reconfigure
|
|
||||||
if configEnabled, err := isKubeletConfigEnabled(f); err != nil {
|
|
||||||
return err
|
|
||||||
} else if !configEnabled {
|
|
||||||
return fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
|
||||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
|
||||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the ConfigMap with the new configuration
|
|
||||||
cm, err := createConfigMap(f, kubeCfg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the reference and set Node.Spec.ConfigSource
|
|
||||||
src := &v1.NodeConfigSource{
|
|
||||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
|
||||||
Namespace: "kube-system",
|
|
||||||
Name: cm.Name,
|
|
||||||
KubeletConfigKey: "kubelet",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// set the source, retry a few times in case we are competing with other writers
|
|
||||||
gomega.Eventually(func() error {
|
|
||||||
if err := setNodeConfigSource(f, src); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, time.Minute, time.Second).Should(gomega.BeNil())
|
|
||||||
|
|
||||||
// poll for new config, for a maximum wait of restartGap
|
|
||||||
gomega.Eventually(func() error {
|
|
||||||
newKubeCfg, err := getCurrentKubeletConfig()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed trying to get current Kubelet config, will retry, error: %v", err)
|
|
||||||
}
|
|
||||||
if !apiequality.Semantic.DeepEqual(*kubeCfg, *newKubeCfg) {
|
|
||||||
return fmt.Errorf("still waiting for new configuration to take effect, will continue to watch /configz")
|
|
||||||
}
|
|
||||||
klog.Infof("new configuration has taken effect")
|
|
||||||
return nil
|
|
||||||
}, restartGap, pollInterval).Should(gomega.BeNil())
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sets the current node's configSource, this should only be called from Serial tests
|
// sets the current node's configSource, this should only be called from Serial tests
|
||||||
func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) error {
|
func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) error {
|
||||||
// since this is a serial test, we just get the node, change the source, and then update it
|
// since this is a serial test, we just get the node, change the source, and then update it
|
||||||
@ -275,16 +247,6 @@ func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates a configmap containing kubeCfg in kube-system namespace
|
|
||||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
|
|
||||||
cmap := newKubeletConfigMap("testcfg", internalKC)
|
|
||||||
cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cmap, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cmap, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
||||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
|
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
|
||||||
data, err := kubeletconfigcodec.EncodeKubeletConfig(internalKC, kubeletconfigv1beta1.SchemeGroupVersion)
|
data, err := kubeletconfigcodec.EncodeKubeletConfig(internalKC, kubeletconfigv1beta1.SchemeGroupVersion)
|
||||||
@ -447,8 +409,9 @@ func stopKubelet() func() {
|
|||||||
framework.ExpectNoError(err, "Failed to stop kubelet with systemctl: %v, %s", err, string(stdout))
|
framework.ExpectNoError(err, "Failed to stop kubelet with systemctl: %v, %s", err, string(stdout))
|
||||||
|
|
||||||
return func() {
|
return func() {
|
||||||
stdout, err := exec.Command("sudo", "systemctl", "start", kubeletServiceName).CombinedOutput()
|
// we should restart service, otherwise the transient service start will fail
|
||||||
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %s", err, string(stdout))
|
stdout, err := exec.Command("sudo", "systemctl", "restart", kubeletServiceName).CombinedOutput()
|
||||||
|
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user