mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
e2e node: provide tests for memory manager pod resources metrics
- verify memory manager data returned by `GetAllocatableResources` - verify pod container memory manager data Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
parent
03830db82d
commit
681905706d
@ -31,9 +31,12 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
|
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
@ -153,13 +156,33 @@ func getMemoryManagerState() (*state.MemoryManagerCheckpoint, error) {
|
|||||||
return memoryManagerCheckpoint, nil
|
return memoryManagerCheckpoint, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getAllocatableMemoryFromStateFile(s *state.MemoryManagerCheckpoint) []state.Block {
|
||||||
|
var allocatableMemory []state.Block
|
||||||
|
for numaNodeID, numaNodeState := range s.MachineState {
|
||||||
|
for resourceName, memoryTable := range numaNodeState.MemoryMap {
|
||||||
|
if memoryTable.Allocatable == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
block := state.Block{
|
||||||
|
NUMAAffinity: []int{numaNodeID},
|
||||||
|
Type: resourceName,
|
||||||
|
Size: memoryTable.Allocatable,
|
||||||
|
}
|
||||||
|
allocatableMemory = append(allocatableMemory, block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allocatableMemory
|
||||||
|
}
|
||||||
|
|
||||||
type kubeletParams struct {
|
type kubeletParams struct {
|
||||||
memoryManagerFeatureGate bool
|
memoryManagerFeatureGate bool
|
||||||
memoryManagerPolicy string
|
podResourcesGetAllocatableFeatureGate bool
|
||||||
systemReservedMemory []kubeletconfig.MemoryReservation
|
memoryManagerPolicy string
|
||||||
systemReserved map[string]string
|
systemReservedMemory []kubeletconfig.MemoryReservation
|
||||||
kubeReserved map[string]string
|
systemReserved map[string]string
|
||||||
evictionHard map[string]string
|
kubeReserved map[string]string
|
||||||
|
evictionHard map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUpdatedKubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration, params *kubeletParams) *kubeletconfig.KubeletConfiguration {
|
func getUpdatedKubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration, params *kubeletParams) *kubeletconfig.KubeletConfiguration {
|
||||||
@ -169,6 +192,8 @@ func getUpdatedKubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration, params
|
|||||||
newCfg.FeatureGates = map[string]bool{}
|
newCfg.FeatureGates = map[string]bool{}
|
||||||
}
|
}
|
||||||
newCfg.FeatureGates["MemoryManager"] = params.memoryManagerFeatureGate
|
newCfg.FeatureGates["MemoryManager"] = params.memoryManagerFeatureGate
|
||||||
|
newCfg.FeatureGates["KubeletPodResourcesGetAllocatable"] = params.podResourcesGetAllocatableFeatureGate
|
||||||
|
|
||||||
newCfg.MemoryManagerPolicy = params.memoryManagerPolicy
|
newCfg.MemoryManagerPolicy = params.memoryManagerPolicy
|
||||||
|
|
||||||
// update system-reserved
|
// update system-reserved
|
||||||
@ -257,14 +282,15 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
|
|||||||
|
|
||||||
f := framework.NewDefaultFramework("memory-manager-test")
|
f := framework.NewDefaultFramework("memory-manager-test")
|
||||||
|
|
||||||
memoryQuantatity := resource.MustParse("1100Mi")
|
memoryQuantity := resource.MustParse("1100Mi")
|
||||||
defaultKubeParams := &kubeletParams{
|
defaultKubeParams := &kubeletParams{
|
||||||
memoryManagerFeatureGate: true,
|
memoryManagerFeatureGate: true,
|
||||||
|
podResourcesGetAllocatableFeatureGate: true,
|
||||||
systemReservedMemory: []kubeletconfig.MemoryReservation{
|
systemReservedMemory: []kubeletconfig.MemoryReservation{
|
||||||
{
|
{
|
||||||
NumaNode: 0,
|
NumaNode: 0,
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
resourceMemory: memoryQuantatity,
|
resourceMemory: memoryQuantity,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -367,12 +393,14 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
testPod = makeMemoryManagerPod(ctnParams[0].ctnName, initCtnParams, ctnParams)
|
if len(ctnParams) > 0 {
|
||||||
|
testPod = makeMemoryManagerPod(ctnParams[0].ctnName, initCtnParams, ctnParams)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.JustAfterEach(func() {
|
ginkgo.JustAfterEach(func() {
|
||||||
// delete the test pod
|
// delete the test pod
|
||||||
if testPod.Name != "" {
|
if testPod != nil && testPod.Name != "" {
|
||||||
f.PodClient().DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
f.PodClient().DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -407,6 +435,48 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
|
|||||||
initCtnParams = []memoryManagerCtnAttributes{}
|
initCtnParams = []memoryManagerCtnAttributes{}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
|
||||||
|
ginkgo.It("should report memory data during request to pod resources GetAllocatableResources", func() {
|
||||||
|
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
|
||||||
|
|
||||||
|
stateData, err := getMemoryManagerState()
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
stateAllocatableMemory := getAllocatableMemoryFromStateFile(stateData)
|
||||||
|
framework.ExpectEqual(len(resp.Memory), len(stateAllocatableMemory))
|
||||||
|
|
||||||
|
for _, containerMemory := range resp.Memory {
|
||||||
|
gomega.Expect(containerMemory.Topology).NotTo(gomega.BeNil())
|
||||||
|
framework.ExpectEqual(len(containerMemory.Topology.Nodes), 1)
|
||||||
|
gomega.Expect(containerMemory.Topology.Nodes[0]).NotTo(gomega.BeNil())
|
||||||
|
|
||||||
|
numaNodeID := int(containerMemory.Topology.Nodes[0].ID)
|
||||||
|
for _, numaStateMemory := range stateAllocatableMemory {
|
||||||
|
framework.ExpectEqual(len(numaStateMemory.NUMAAffinity), 1)
|
||||||
|
if numaNodeID != numaStateMemory.NUMAAffinity[0] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if containerMemory.MemoryType != string(numaStateMemory.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
gomega.Expect(containerMemory.Size_).To(gomega.BeEquivalentTo(numaStateMemory.Size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.When("guaranteed pod has init and app containers", func() {
|
ginkgo.When("guaranteed pod has init and app containers", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// override containers parameters
|
// override containers parameters
|
||||||
@ -499,6 +569,48 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
|
|||||||
verifyMemoryPinning(testPod2, []int{0})
|
verifyMemoryPinning(testPod2, []int{0})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
|
||||||
|
ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func() {
|
||||||
|
ginkgo.By("Running the test pod and the test pod 2")
|
||||||
|
testPod = f.PodClient().CreateSync(testPod)
|
||||||
|
|
||||||
|
ginkgo.By("Running the test pod 2")
|
||||||
|
testPod2 = f.PodClient().CreateSync(testPod2)
|
||||||
|
|
||||||
|
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
for _, pod := range []*v1.Pod{testPod, testPod2} {
|
||||||
|
for _, podResource := range resp.PodResources {
|
||||||
|
if podResource.Name != pod.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range pod.Spec.Containers {
|
||||||
|
for _, containerResource := range podResource.Containers {
|
||||||
|
if containerResource.Name != c.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, containerMemory := range containerResource.Memory {
|
||||||
|
q := c.Resources.Limits[v1.ResourceName(containerMemory.MemoryType)]
|
||||||
|
value, ok := q.AsInt64()
|
||||||
|
gomega.Expect(ok).To(gomega.BeTrue())
|
||||||
|
gomega.Expect(value).To(gomega.BeEquivalentTo(containerMemory.Size_))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.JustAfterEach(func() {
|
ginkgo.JustAfterEach(func() {
|
||||||
// delete the test pod 2
|
// delete the test pod 2
|
||||||
if testPod2.Name != "" {
|
if testPod2.Name != "" {
|
||||||
@ -604,6 +716,46 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
|
||||||
|
ginkgo.It("should not report any memory data during request to pod resources GetAllocatableResources", func() {
|
||||||
|
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
gomega.Expect(resp.Memory).To(gomega.BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
|
||||||
|
ginkgo.It("should not report any memory data during request to pod resources List", func() {
|
||||||
|
testPod = f.PodClient().CreateSync(testPod)
|
||||||
|
|
||||||
|
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
for _, podResource := range resp.PodResources {
|
||||||
|
if podResource.Name != testPod.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, containerResource := range podResource.Containers {
|
||||||
|
gomega.Expect(containerResource.Memory).To(gomega.BeEmpty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.It("should succeed to start the pod", func() {
|
ginkgo.It("should succeed to start the pod", func() {
|
||||||
testPod = f.PodClient().CreateSync(testPod)
|
testPod = f.PodClient().CreateSync(testPod)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user