Merge pull request #101030 from cynepco3hahue/pod_resources_memory_interface

Extend pod resource API response to return the information from memory manager
This commit is contained in:
Kubernetes Prow Robot 2021-06-22 06:35:58 -07:00 committed by GitHub
commit 985ac8ae50
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 871 additions and 55 deletions

View File

@ -32,15 +32,17 @@ type v1PodResourcesServer struct {
podsProvider PodsProvider podsProvider PodsProvider
devicesProvider DevicesProvider devicesProvider DevicesProvider
cpusProvider CPUsProvider cpusProvider CPUsProvider
memoryProvider MemoryProvider
} }
// NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider // NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
// with device information provided by the DevicesProvider // with device information provided by the DevicesProvider
func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider, cpusProvider CPUsProvider) v1.PodResourcesListerServer { func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider, cpusProvider CPUsProvider, memoryProvider MemoryProvider) v1.PodResourcesListerServer {
return &v1PodResourcesServer{ return &v1PodResourcesServer{
podsProvider: podsProvider, podsProvider: podsProvider,
devicesProvider: devicesProvider, devicesProvider: devicesProvider,
cpusProvider: cpusProvider, cpusProvider: cpusProvider,
memoryProvider: memoryProvider,
} }
} }
@ -65,6 +67,7 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
Name: container.Name, Name: container.Name,
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name),
} }
} }
podResources[i] = &pRes podResources[i] = &pRes
@ -90,5 +93,6 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
return &v1.AllocatableResourcesResponse{ return &v1.AllocatableResourcesResponse{
Devices: p.devicesProvider.GetAllocatableDevices(), Devices: p.devicesProvider.GetAllocatableDevices(),
CpuIds: p.cpusProvider.GetAllocatableCPUs(), CpuIds: p.cpusProvider.GetAllocatableCPUs(),
Memory: p.memoryProvider.GetAllocatableMemory(),
}, nil }, nil
} }

View File

@ -31,6 +31,7 @@ import (
pkgfeatures "k8s.io/kubernetes/pkg/features" pkgfeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
) )
func TestListPodResourcesV1(t *testing.T) { func TestListPodResourcesV1(t *testing.T) {
@ -50,11 +51,25 @@ func TestListPodResourcesV1(t *testing.T) {
cpus := []int64{12, 23, 30} cpus := []int64{12, 23, 30}
memory := []*podresourcesapi.ContainerMemory{
{
MemoryType: "memory",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
{
MemoryType: "hugepages-1Gi",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
}
for _, tc := range []struct { for _, tc := range []struct {
desc string desc string
pods []*v1.Pod pods []*v1.Pod
devices []*podresourcesapi.ContainerDevices devices []*podresourcesapi.ContainerDevices
cpus []int64 cpus []int64
memory []*podresourcesapi.ContainerMemory
expectedResponse *podresourcesapi.ListPodResourcesResponse expectedResponse *podresourcesapi.ListPodResourcesResponse
}{ }{
{ {
@ -62,6 +77,7 @@ func TestListPodResourcesV1(t *testing.T) {
pods: []*v1.Pod{}, pods: []*v1.Pod{},
devices: []*podresourcesapi.ContainerDevices{}, devices: []*podresourcesapi.ContainerDevices{},
cpus: []int64{}, cpus: []int64{},
memory: []*podresourcesapi.ContainerMemory{},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{}, expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
}, },
{ {
@ -84,6 +100,7 @@ func TestListPodResourcesV1(t *testing.T) {
}, },
devices: []*podresourcesapi.ContainerDevices{}, devices: []*podresourcesapi.ContainerDevices{},
cpus: []int64{}, cpus: []int64{},
memory: []*podresourcesapi.ContainerMemory{},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{ expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{ PodResources: []*podresourcesapi.PodResources{
{ {
@ -119,6 +136,7 @@ func TestListPodResourcesV1(t *testing.T) {
}, },
devices: devs, devices: devs,
cpus: cpus, cpus: cpus,
memory: memory,
expectedResponse: &podresourcesapi.ListPodResourcesResponse{ expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{ PodResources: []*podresourcesapi.PodResources{
{ {
@ -129,6 +147,7 @@ func TestListPodResourcesV1(t *testing.T) {
Name: containerName, Name: containerName,
Devices: devs, Devices: devs,
CpuIds: cpus, CpuIds: cpus,
Memory: memory,
}, },
}, },
}, },
@ -141,10 +160,12 @@ func TestListPodResourcesV1(t *testing.T) {
m.On("GetPods").Return(tc.pods) m.On("GetPods").Return(tc.pods)
m.On("GetDevices", string(podUID), containerName).Return(tc.devices) m.On("GetDevices", string(podUID), containerName).Return(tc.devices)
m.On("GetCPUs", string(podUID), containerName).Return(tc.cpus) m.On("GetCPUs", string(podUID), containerName).Return(tc.cpus)
m.On("GetMemory", string(podUID), containerName).Return(tc.memory)
m.On("UpdateAllocatedDevices").Return() m.On("UpdateAllocatedDevices").Return()
m.On("GetAllocatableCPUs").Return(cpuset.CPUSet{}) m.On("GetAllocatableCPUs").Return(cpuset.CPUSet{})
m.On("GetAllocatableDevices").Return(devicemanager.NewResourceDeviceInstances()) m.On("GetAllocatableDevices").Return(devicemanager.NewResourceDeviceInstances())
server := NewV1PodResourcesServer(m, m, m) m.On("GetAllocatableMemory").Return([]state.Block{})
server := NewV1PodResourcesServer(m, m, m, m)
resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{}) resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{})
if err != nil { if err != nil {
t.Errorf("want err = %v, got %q", nil, err) t.Errorf("want err = %v, got %q", nil, err)
@ -215,16 +236,65 @@ func TestAllocatableResources(t *testing.T) {
allCPUs := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} allCPUs := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
allMemory := []*podresourcesapi.ContainerMemory{
{
MemoryType: "memory",
Size_: 5368709120,
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: 0,
},
},
},
},
{
MemoryType: "hugepages-2Mi",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: 0,
},
},
},
},
{
MemoryType: "memory",
Size_: 5368709120,
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: 1,
},
},
},
},
{
MemoryType: "hugepages-2Mi",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: 1,
},
},
},
},
}
for _, tc := range []struct { for _, tc := range []struct {
desc string desc string
allCPUs []int64 allCPUs []int64
allDevices []*podresourcesapi.ContainerDevices allDevices []*podresourcesapi.ContainerDevices
allMemory []*podresourcesapi.ContainerMemory
expectedAllocatableResourcesResponse *podresourcesapi.AllocatableResourcesResponse expectedAllocatableResourcesResponse *podresourcesapi.AllocatableResourcesResponse
}{ }{
{ {
desc: "no devices, no CPUs", desc: "no devices, no CPUs",
allCPUs: []int64{}, allCPUs: []int64{},
allDevices: []*podresourcesapi.ContainerDevices{}, allDevices: []*podresourcesapi.ContainerDevices{},
allMemory: []*podresourcesapi.ContainerMemory{},
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{}, expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{},
}, },
{ {
@ -235,6 +305,14 @@ func TestAllocatableResources(t *testing.T) {
CpuIds: allCPUs, CpuIds: allCPUs,
}, },
}, },
{
desc: "no devices, no CPUs, all memory",
allCPUs: []int64{},
allDevices: []*podresourcesapi.ContainerDevices{},
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
Memory: allMemory,
},
},
{ {
desc: "with devices, all CPUs", desc: "with devices, all CPUs",
allCPUs: allCPUs, allCPUs: allCPUs,
@ -361,10 +439,12 @@ func TestAllocatableResources(t *testing.T) {
m := new(mockProvider) m := new(mockProvider)
m.On("GetDevices", "", "").Return([]*podresourcesapi.ContainerDevices{}) m.On("GetDevices", "", "").Return([]*podresourcesapi.ContainerDevices{})
m.On("GetCPUs", "", "").Return([]int64{}) m.On("GetCPUs", "", "").Return([]int64{})
m.On("GetMemory", "", "").Return([]*podresourcesapi.ContainerMemory{})
m.On("UpdateAllocatedDevices").Return() m.On("UpdateAllocatedDevices").Return()
m.On("GetAllocatableDevices").Return(tc.allDevices) m.On("GetAllocatableDevices").Return(tc.allDevices)
m.On("GetAllocatableCPUs").Return(tc.allCPUs) m.On("GetAllocatableCPUs").Return(tc.allCPUs)
server := NewV1PodResourcesServer(m, m, m) m.On("GetAllocatableMemory").Return(tc.allMemory)
server := NewV1PodResourcesServer(m, m, m, m)
resp, err := server.GetAllocatableResources(context.TODO(), &podresourcesapi.AllocatableResourcesRequest{}) resp, err := server.GetAllocatableResources(context.TODO(), &podresourcesapi.AllocatableResourcesRequest{})
if err != nil { if err != nil {

View File

@ -48,6 +48,11 @@ func (m *mockProvider) GetCPUs(podUID, containerName string) []int64 {
return args.Get(0).([]int64) return args.Get(0).([]int64)
} }
func (m *mockProvider) GetMemory(podUID, containerName string) []*podresourcesv1.ContainerMemory {
args := m.Called(podUID, containerName)
return args.Get(0).([]*podresourcesv1.ContainerMemory)
}
func (m *mockProvider) UpdateAllocatedDevices() { func (m *mockProvider) UpdateAllocatedDevices() {
m.Called() m.Called()
} }
@ -62,6 +67,11 @@ func (m *mockProvider) GetAllocatableCPUs() []int64 {
return args.Get(0).([]int64) return args.Get(0).([]int64)
} }
func (m *mockProvider) GetAllocatableMemory() []*podresourcesv1.ContainerMemory {
args := m.Called()
return args.Get(0).([]*podresourcesv1.ContainerMemory)
}
func TestListPodResourcesV1alpha1(t *testing.T) { func TestListPodResourcesV1alpha1(t *testing.T) {
podName := "pod-name" podName := "pod-name"
podNamespace := "pod-namespace" podNamespace := "pod-namespace"

View File

@ -43,3 +43,10 @@ type CPUsProvider interface {
// GetAllocatableCPUs returns the allocatable (not allocated) CPUs // GetAllocatableCPUs returns the allocatable (not allocated) CPUs
GetAllocatableCPUs() []int64 GetAllocatableCPUs() []int64
} }
type MemoryProvider interface {
// GetMemory returns information about the memory assigned to containers
GetMemory(podUID, containerName string) []*podresourcesapi.ContainerMemory
// GetAllocatableMemory returns the allocatable memory from the node
GetAllocatableMemory() []*podresourcesapi.ContainerMemory
}

View File

@ -112,9 +112,10 @@ type ContainerManager interface {
// GetAllocateResourcesPodAdmitHandler returns an instance of a PodAdmitHandler responsible for allocating pod resources. // GetAllocateResourcesPodAdmitHandler returns an instance of a PodAdmitHandler responsible for allocating pod resources.
GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler
// Implements the podresources Provider API for CPUs and Devices // Implements the podresources Provider API for CPUs, Memory and Devices
podresources.CPUsProvider podresources.CPUsProvider
podresources.DevicesProvider podresources.DevicesProvider
podresources.MemoryProvider
} }
type NodeConfig struct { type NodeConfig struct {

View File

@ -55,6 +55,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
memorymanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
@ -1086,6 +1087,22 @@ func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
return []int64{} return []int64{}
} }
func (cm *containerManagerImpl) GetMemory(podUID, containerName string) []*podresourcesapi.ContainerMemory {
if cm.memoryManager == nil {
return []*podresourcesapi.ContainerMemory{}
}
return containerMemoryFromBlock(cm.memoryManager.GetMemory(podUID, containerName))
}
func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
if cm.memoryManager == nil {
return []*podresourcesapi.ContainerMemory{}
}
return containerMemoryFromBlock(cm.memoryManager.GetAllocatableMemory())
}
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
return cm.deviceManager.ShouldResetExtendedResourceCapacity() return cm.deviceManager.ShouldResetExtendedResourceCapacity()
} }
@ -1093,3 +1110,25 @@ func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
func (cm *containerManagerImpl) UpdateAllocatedDevices() { func (cm *containerManagerImpl) UpdateAllocatedDevices() {
cm.deviceManager.UpdateAllocatedDevices() cm.deviceManager.UpdateAllocatedDevices()
} }
func containerMemoryFromBlock(blocks []memorymanagerstate.Block) []*podresourcesapi.ContainerMemory {
var containerMemories []*podresourcesapi.ContainerMemory
for _, b := range blocks {
containerMemory := podresourcesapi.ContainerMemory{
MemoryType: string(b.Type),
Size_: b.Size,
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{},
},
}
for _, numaNodeID := range b.NUMAAffinity {
containerMemory.Topology.Nodes = append(containerMemory.Topology.Nodes, &podresourcesapi.NUMANode{ID: int64(numaNodeID)})
}
containerMemories = append(containerMemories, &containerMemory)
}
return containerMemories
}

View File

@ -139,6 +139,14 @@ func (cm *containerManagerStub) GetAllocatableCPUs() []int64 {
return nil return nil
} }
func (cm *containerManagerStub) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
return nil
}
func (cm *containerManagerStub) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
return nil
}
func NewStubContainerManager() ContainerManager { func NewStubContainerManager() ContainerManager {
return &containerManagerStub{shouldResetExtendedResourceCapacity: false} return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
} }

View File

@ -243,3 +243,11 @@ func (cm *containerManagerImpl) GetCPUs(_, _ string) []int64 {
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 { func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
return nil return nil
} }
func (cm *containerManagerImpl) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
return nil
}
func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
return nil
}

View File

@ -214,3 +214,16 @@ func (cm *FakeContainerManager) GetAllocatableCPUs() []int64 {
defer cm.Unlock() defer cm.Unlock()
return nil return nil
} }
func (cm *FakeContainerManager) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetMemory")
return nil
}
func (cm *FakeContainerManager) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
cm.Lock()
defer cm.Unlock()
return nil
}

View File

@ -61,12 +61,12 @@ func (m *fakeManager) RemoveContainer(containerID string) error {
} }
func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
klog.InfoS("Get Topology Hints") klog.InfoS("Get Topology Hints", "pod", klog.KObj(pod), "containerName", container.Name)
return map[string][]topologymanager.TopologyHint{} return map[string][]topologymanager.TopologyHint{}
} }
func (m *fakeManager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { func (m *fakeManager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
klog.InfoS("Get Pod Topology Hints") klog.InfoS("Get Pod Topology Hints", "pod", klog.KObj(pod))
return map[string][]topologymanager.TopologyHint{} return map[string][]topologymanager.TopologyHint{}
} }
@ -74,6 +74,18 @@ func (m *fakeManager) State() state.Reader {
return m.state return m.state
} }
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (m *fakeManager) GetAllocatableMemory() []state.Block {
klog.InfoS("Get Allocatable Memory")
return []state.Block{}
}
// GetMemory returns the memory allocated by a container from NUMA nodes
func (m *fakeManager) GetMemory(podUID, containerName string) []state.Block {
klog.InfoS("Get Memory", "podUID", podUID, "containerName", containerName)
return []state.Block{}
}
// NewFakeManager creates empty/fake memory manager // NewFakeManager creates empty/fake memory manager
func NewFakeManager() Manager { func NewFakeManager() Manager {
return &fakeManager{ return &fakeManager{

View File

@ -83,6 +83,12 @@ type Manager interface {
// GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory // GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory
GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
GetAllocatableMemory() []state.Block
// GetMemory returns the memory allocated by a container from NUMA nodes
GetMemory(podUID, containerName string) []state.Block
} }
type manager struct { type manager struct {
@ -115,6 +121,9 @@ type manager struct {
// stateFileDirectory holds the directory where the state file for checkpoints is held. // stateFileDirectory holds the directory where the state file for checkpoints is held.
stateFileDirectory string stateFileDirectory string
// allocatableMemory holds the allocatable memory for each NUMA node
allocatableMemory []state.Block
} }
var _ Manager = &manager{} var _ Manager = &manager{}
@ -173,6 +182,8 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
return err return err
} }
m.allocatableMemory = m.policy.GetAllocatableMemory(m.state)
return nil return nil
} }
@ -184,7 +195,7 @@ func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID
m.containerMap.Add(string(pod.UID), container.Name, containerID) m.containerMap.Add(string(pod.UID), container.Name, containerID)
} }
// GetMemory provides NUMA nodes that used to allocate the container memory // GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory
func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int { func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int {
// Get NUMA node affinity of blocks assigned to the container during Allocate() // Get NUMA node affinity of blocks assigned to the container during Allocate()
numaNodes := sets.NewInt() numaNodes := sets.NewInt()
@ -407,3 +418,13 @@ func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatab
return reservedMemoryConverted, nil return reservedMemoryConverted, nil
} }
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (m *manager) GetAllocatableMemory() []state.Block {
return m.allocatableMemory
}
// GetMemory returns the memory allocated by a container from NUMA nodes
func (m *manager) GetMemory(podUID, containerName string) []state.Block {
return m.state.GetMemoryBlocks(podUID, containerName)
}

View File

@ -112,6 +112,11 @@ func (p *mockPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string]
return nil return nil
} }
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (p *mockPolicy) GetAllocatableMemory(s state.State) []state.Block {
return []state.Block{}
}
type mockRuntimeService struct { type mockRuntimeService struct {
err error err error
} }

View File

@ -41,4 +41,6 @@ type Policy interface {
// and is consulted to achieve NUMA aware resource alignment among this // and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers. // and other resource controllers.
GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
GetAllocatableMemory(s state.State) []state.Block
} }

View File

@ -66,3 +66,8 @@ func (p *none) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Contai
func (p *none) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { func (p *none) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
return nil return nil
} }
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (p *none) GetAllocatableMemory(s state.State) []state.Block {
return []state.Block{}
}

View File

@ -767,3 +767,24 @@ func findBestHint(hints []topologymanager.TopologyHint) *topologymanager.Topolog
} }
return &bestHint return &bestHint
} }
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (p *staticPolicy) GetAllocatableMemory(s state.State) []state.Block {
var allocatableMemory []state.Block
machineState := s.GetMachineState()
for numaNodeID, numaNodeState := range machineState {
for resourceName, memoryTable := range numaNodeState.MemoryMap {
if memoryTable.Allocatable == 0 {
continue
}
block := state.Block{
NUMAAffinity: []int{numaNodeID},
Type: resourceName,
Size: memoryTable.Allocatable,
}
allocatableMemory = append(allocatableMemory, block)
}
}
return allocatableMemory
}

View File

@ -2277,7 +2277,7 @@ func (kl *Kubelet) ListenAndServePodResources() {
klog.V(2).InfoS("Failed to get local endpoint for PodResources endpoint", "err", err) klog.V(2).InfoS("Failed to get local endpoint for PodResources endpoint", "err", err)
return return
} }
server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager, kl.containerManager) server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager, kl.containerManager, kl.containerManager)
} }
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around. // Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.

View File

@ -188,10 +188,10 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer st
} }
// ListenAndServePodResources initializes a gRPC server to serve the PodResources service // ListenAndServePodResources initializes a gRPC server to serve the PodResources service
func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider, cpusProvider podresources.CPUsProvider) { func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider, cpusProvider podresources.CPUsProvider, memoryProvider podresources.MemoryProvider) {
server := grpc.NewServer() server := grpc.NewServer()
podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(podsProvider, devicesProvider)) podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(podsProvider, devicesProvider))
podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(podsProvider, devicesProvider, cpusProvider)) podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(podsProvider, devicesProvider, cpusProvider, memoryProvider))
l, err := util.CreateListener(socket) l, err := util.CreateListener(socket)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to create listener for podResources endpoint") klog.ErrorS(err, "Failed to create listener for podResources endpoint")

View File

@ -86,6 +86,7 @@ var xxx_messageInfo_AllocatableResourcesRequest proto.InternalMessageInfo
type AllocatableResourcesResponse struct { type AllocatableResourcesResponse struct {
Devices []*ContainerDevices `protobuf:"bytes,1,rep,name=devices,proto3" json:"devices,omitempty"` Devices []*ContainerDevices `protobuf:"bytes,1,rep,name=devices,proto3" json:"devices,omitempty"`
CpuIds []int64 `protobuf:"varint,2,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` CpuIds []int64 `protobuf:"varint,2,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"`
Memory []*ContainerMemory `protobuf:"bytes,3,rep,name=memory,proto3" json:"memory,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
} }
@ -136,6 +137,13 @@ func (m *AllocatableResourcesResponse) GetCpuIds() []int64 {
return nil return nil
} }
func (m *AllocatableResourcesResponse) GetMemory() []*ContainerMemory {
if m != nil {
return m.Memory
}
return nil
}
// ListPodResourcesRequest is the request made to the PodResourcesLister service // ListPodResourcesRequest is the request made to the PodResourcesLister service
type ListPodResourcesRequest struct { type ListPodResourcesRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -287,6 +295,7 @@ type ContainerResources struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"` Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"`
CpuIds []int64 `protobuf:"varint,3,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` CpuIds []int64 `protobuf:"varint,3,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"`
Memory []*ContainerMemory `protobuf:"bytes,4,rep,name=memory,proto3" json:"memory,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
} }
@ -344,6 +353,75 @@ func (m *ContainerResources) GetCpuIds() []int64 {
return nil return nil
} }
func (m *ContainerResources) GetMemory() []*ContainerMemory {
if m != nil {
return m.Memory
}
return nil
}
// ContainerMemory contains information about memory and hugepages assigned to a container
type ContainerMemory struct {
MemoryType string `protobuf:"bytes,1,opt,name=memory_type,json=memoryType,proto3" json:"memory_type,omitempty"`
Size_ uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
Topology *TopologyInfo `protobuf:"bytes,3,opt,name=topology,proto3" json:"topology,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ContainerMemory) Reset() { *m = ContainerMemory{} }
func (*ContainerMemory) ProtoMessage() {}
func (*ContainerMemory) Descriptor() ([]byte, []int) {
return fileDescriptor_00212fb1f9d3bf1c, []int{6}
}
func (m *ContainerMemory) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ContainerMemory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ContainerMemory.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ContainerMemory) XXX_Merge(src proto.Message) {
xxx_messageInfo_ContainerMemory.Merge(m, src)
}
func (m *ContainerMemory) XXX_Size() int {
return m.Size()
}
func (m *ContainerMemory) XXX_DiscardUnknown() {
xxx_messageInfo_ContainerMemory.DiscardUnknown(m)
}
var xxx_messageInfo_ContainerMemory proto.InternalMessageInfo
func (m *ContainerMemory) GetMemoryType() string {
if m != nil {
return m.MemoryType
}
return ""
}
func (m *ContainerMemory) GetSize_() uint64 {
if m != nil {
return m.Size_
}
return 0
}
func (m *ContainerMemory) GetTopology() *TopologyInfo {
if m != nil {
return m.Topology
}
return nil
}
// ContainerDevices contains information about the devices assigned to a container // ContainerDevices contains information about the devices assigned to a container
type ContainerDevices struct { type ContainerDevices struct {
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
@ -356,7 +434,7 @@ type ContainerDevices struct {
func (m *ContainerDevices) Reset() { *m = ContainerDevices{} } func (m *ContainerDevices) Reset() { *m = ContainerDevices{} }
func (*ContainerDevices) ProtoMessage() {} func (*ContainerDevices) ProtoMessage() {}
func (*ContainerDevices) Descriptor() ([]byte, []int) { func (*ContainerDevices) Descriptor() ([]byte, []int) {
return fileDescriptor_00212fb1f9d3bf1c, []int{6} return fileDescriptor_00212fb1f9d3bf1c, []int{7}
} }
func (m *ContainerDevices) XXX_Unmarshal(b []byte) error { func (m *ContainerDevices) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -416,7 +494,7 @@ type TopologyInfo struct {
func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } func (m *TopologyInfo) Reset() { *m = TopologyInfo{} }
func (*TopologyInfo) ProtoMessage() {} func (*TopologyInfo) ProtoMessage() {}
func (*TopologyInfo) Descriptor() ([]byte, []int) { func (*TopologyInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_00212fb1f9d3bf1c, []int{7} return fileDescriptor_00212fb1f9d3bf1c, []int{8}
} }
func (m *TopologyInfo) XXX_Unmarshal(b []byte) error { func (m *TopologyInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -462,7 +540,7 @@ type NUMANode struct {
func (m *NUMANode) Reset() { *m = NUMANode{} } func (m *NUMANode) Reset() { *m = NUMANode{} }
func (*NUMANode) ProtoMessage() {} func (*NUMANode) ProtoMessage() {}
func (*NUMANode) Descriptor() ([]byte, []int) { func (*NUMANode) Descriptor() ([]byte, []int) {
return fileDescriptor_00212fb1f9d3bf1c, []int{8} return fileDescriptor_00212fb1f9d3bf1c, []int{9}
} }
func (m *NUMANode) XXX_Unmarshal(b []byte) error { func (m *NUMANode) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -505,6 +583,7 @@ func init() {
proto.RegisterType((*ListPodResourcesResponse)(nil), "v1.ListPodResourcesResponse") proto.RegisterType((*ListPodResourcesResponse)(nil), "v1.ListPodResourcesResponse")
proto.RegisterType((*PodResources)(nil), "v1.PodResources") proto.RegisterType((*PodResources)(nil), "v1.PodResources")
proto.RegisterType((*ContainerResources)(nil), "v1.ContainerResources") proto.RegisterType((*ContainerResources)(nil), "v1.ContainerResources")
proto.RegisterType((*ContainerMemory)(nil), "v1.ContainerMemory")
proto.RegisterType((*ContainerDevices)(nil), "v1.ContainerDevices") proto.RegisterType((*ContainerDevices)(nil), "v1.ContainerDevices")
proto.RegisterType((*TopologyInfo)(nil), "v1.TopologyInfo") proto.RegisterType((*TopologyInfo)(nil), "v1.TopologyInfo")
proto.RegisterType((*NUMANode)(nil), "v1.NUMANode") proto.RegisterType((*NUMANode)(nil), "v1.NUMANode")
@ -513,37 +592,41 @@ func init() {
func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) }
var fileDescriptor_00212fb1f9d3bf1c = []byte{ var fileDescriptor_00212fb1f9d3bf1c = []byte{
// 480 bytes of a gzipped FileDescriptorProto // 539 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xcd, 0x6e, 0xd3, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0x80, 0xb3, 0x76, 0x69, 0x9b, 0xc1, 0x45, 0xd5, 0x0a, 0x11, 0x93, 0xa6, 0xc6, 0x5a, 0x2e, 0x10, 0xed, 0xda, 0x21, 0x6d, 0xa6, 0x29, 0x54, 0x0b, 0x22, 0x26, 0x4d, 0xdd, 0xc8, 0x5c, 0x22,
0x39, 0x80, 0xab, 0x06, 0xc1, 0xbd, 0x34, 0x12, 0xb2, 0x04, 0x11, 0xac, 0xe0, 0x4a, 0xe4, 0xd8, 0x01, 0xae, 0x1a, 0x04, 0xf7, 0xd2, 0x48, 0x28, 0x12, 0x8d, 0x60, 0x55, 0xae, 0x44, 0x8e, 0xbd,
0x5b, 0x63, 0x29, 0xf5, 0x6c, 0xbd, 0x76, 0x04, 0x37, 0x0e, 0x3c, 0x00, 0xaf, 0xc3, 0x1b, 0xf4, 0x0d, 0x96, 0x12, 0xef, 0xe2, 0x5d, 0x47, 0x84, 0x13, 0x07, 0x3e, 0x80, 0x03, 0x67, 0xfe, 0x83,
0xc8, 0x91, 0x23, 0x0d, 0x2f, 0x82, 0xbc, 0x8e, 0x53, 0x87, 0xa4, 0x48, 0x3d, 0x79, 0x66, 0xbe, 0x3f, 0xe8, 0x91, 0x23, 0x47, 0x1a, 0x7e, 0x04, 0xed, 0xda, 0x4e, 0x9d, 0x26, 0x01, 0xf5, 0xe4,
0xf9, 0xf3, 0xcc, 0x2c, 0xb4, 0x03, 0x99, 0x78, 0x32, 0xc3, 0x1c, 0xa9, 0x31, 0x3b, 0xee, 0x3e, 0xd9, 0x79, 0x33, 0xb3, 0x6f, 0xe6, 0x8d, 0x17, 0x2a, 0x1e, 0x0f, 0x5d, 0x1e, 0x33, 0xc9, 0xb0,
0x8d, 0x93, 0xfc, 0x53, 0x31, 0xf1, 0x42, 0x3c, 0x3f, 0x8a, 0x31, 0xc6, 0x23, 0x8d, 0x26, 0xc5, 0x31, 0x39, 0xaa, 0x3f, 0x19, 0x86, 0xf2, 0x7d, 0x32, 0x70, 0x7d, 0x36, 0x3e, 0x1c, 0xb2, 0x21,
0x99, 0xd6, 0xb4, 0xa2, 0xa5, 0x2a, 0x84, 0x1d, 0xc2, 0xc1, 0xc9, 0x74, 0x8a, 0x61, 0x90, 0x07, 0x3b, 0xd4, 0xd0, 0x20, 0x39, 0xd7, 0x27, 0x7d, 0xd0, 0x56, 0x9a, 0xe2, 0xec, 0xc3, 0xde, 0xf1,
0x93, 0xa9, 0xe0, 0x42, 0x61, 0x91, 0x85, 0x42, 0x71, 0x71, 0x51, 0x08, 0x95, 0xb3, 0x18, 0x7a, 0x68, 0xc4, 0x7c, 0x4f, 0x7a, 0x83, 0x11, 0x25, 0x54, 0xb0, 0x24, 0xf6, 0xa9, 0x20, 0xf4, 0x43,
0x9b, 0xb1, 0x92, 0x98, 0x2a, 0x41, 0x3d, 0xd8, 0x89, 0xc4, 0x2c, 0x09, 0x85, 0xb2, 0x89, 0x6b, 0x42, 0x85, 0x74, 0xbe, 0x21, 0x68, 0xac, 0xc6, 0x05, 0x67, 0x91, 0xa0, 0xd8, 0x85, 0xcd, 0x80,
0xf6, 0xef, 0x0e, 0xee, 0x7b, 0xb3, 0x63, 0xef, 0x14, 0xd3, 0x3c, 0x48, 0x52, 0x91, 0x0d, 0x2b, 0x4e, 0x42, 0x9f, 0x0a, 0x0b, 0x35, 0xcd, 0xd6, 0x76, 0xfb, 0x9e, 0x3b, 0x39, 0x72, 0x4f, 0x58,
0xc6, 0x6b, 0x27, 0xda, 0x81, 0x9d, 0x50, 0x16, 0xe3, 0x24, 0x52, 0xb6, 0xe1, 0x9a, 0x7d, 0x93, 0x24, 0xbd, 0x30, 0xa2, 0x71, 0x27, 0xc5, 0x48, 0x1e, 0x84, 0x6b, 0xb0, 0xe9, 0xf3, 0xa4, 0x1f,
0x6f, 0x87, 0xb2, 0xf0, 0x23, 0xc5, 0x1e, 0x42, 0xe7, 0x75, 0xa2, 0xf2, 0xb7, 0x18, 0xad, 0xf5, 0x06, 0xc2, 0x32, 0x9a, 0x66, 0xcb, 0x24, 0x65, 0x9f, 0x27, 0xdd, 0x40, 0xe0, 0x47, 0x50, 0x1e,
0xf0, 0x0e, 0xec, 0x75, 0xb4, 0xa8, 0xff, 0x1c, 0xf6, 0x24, 0x46, 0xe3, 0xac, 0x06, 0x8b, 0x2e, 0xd3, 0x31, 0x8b, 0xa7, 0x96, 0xa9, 0xeb, 0xdc, 0x5d, 0xa8, 0x73, 0xaa, 0x21, 0x92, 0x85, 0x38,
0xf6, 0xcb, 0x2e, 0x56, 0x02, 0x2c, 0xd9, 0xd0, 0xd8, 0x67, 0xb0, 0x9a, 0x94, 0x52, 0xd8, 0x4a, 0x0f, 0xa0, 0xf6, 0x2a, 0x14, 0xf2, 0x35, 0x0b, 0x96, 0x18, 0xbf, 0x01, 0x6b, 0x19, 0xca, 0xc8,
0x83, 0x73, 0x61, 0x13, 0x97, 0xf4, 0xdb, 0x5c, 0xcb, 0xb4, 0x07, 0xed, 0xf2, 0xab, 0x64, 0x10, 0x3e, 0x83, 0x1d, 0xce, 0x82, 0x7e, 0x9c, 0x03, 0x19, 0xe5, 0x5d, 0x75, 0xd5, 0x42, 0x42, 0x95,
0x0a, 0xdb, 0xd0, 0xe0, 0xda, 0x40, 0x5f, 0x00, 0x84, 0xf5, 0x5f, 0x2a, 0xdb, 0xd4, 0x55, 0x1f, 0x17, 0x4e, 0xce, 0x47, 0xa8, 0x16, 0x51, 0x8c, 0xa1, 0x14, 0x79, 0x63, 0x6a, 0xa1, 0x26, 0x6a,
0xac, 0xfc, 0xfb, 0x75, 0xed, 0x86, 0x27, 0xbb, 0x00, 0xba, 0xee, 0xb1, 0xb1, 0x7e, 0x63, 0xb4, 0x55, 0x88, 0xb6, 0x71, 0x03, 0x2a, 0xea, 0x2b, 0xb8, 0xe7, 0x53, 0xcb, 0xd0, 0xc0, 0x95, 0x03,
0xc6, 0x2d, 0x47, 0x6b, 0xae, 0x8c, 0xf6, 0x1b, 0x81, 0xfd, 0x7f, 0xc3, 0xe8, 0x63, 0xd8, 0xab, 0x3f, 0x07, 0xf0, 0xf3, 0x56, 0x44, 0xd6, 0xe0, 0xfd, 0x85, 0x06, 0xaf, 0xee, 0x2e, 0x44, 0x3a,
0x87, 0x36, 0x6e, 0x94, 0xb6, 0x6a, 0xe3, 0xa8, 0x6c, 0xe1, 0x10, 0xa0, 0xca, 0xbe, 0x5c, 0x58, 0xdf, 0x11, 0xe0, 0xe5, 0x90, 0x95, 0x04, 0x0a, 0x42, 0x18, 0x37, 0x14, 0xc2, 0x5c, 0x23, 0x44,
0x9b, 0xb7, 0x2b, 0x8b, 0x1f, 0x29, 0xfa, 0x04, 0x76, 0x73, 0x94, 0x38, 0xc5, 0xf8, 0x8b, 0x6d, 0xe9, 0xff, 0x42, 0x48, 0xb8, 0x73, 0x0d, 0xc2, 0x07, 0xb0, 0x9d, 0x82, 0x7d, 0x39, 0xe5, 0x39,
0xba, 0xa4, 0x9e, 0xfb, 0xfb, 0x85, 0xcd, 0x4f, 0xcf, 0x90, 0x2f, 0x3d, 0xd8, 0x00, 0xac, 0x26, 0x47, 0x48, 0x5d, 0x67, 0x53, 0x4e, 0x15, 0x7b, 0x11, 0x7e, 0x4a, 0xa7, 0x54, 0x22, 0xda, 0xc6,
0xa1, 0x0c, 0xee, 0xa4, 0x18, 0x2d, 0x57, 0x66, 0x95, 0xa1, 0xa3, 0x0f, 0x6f, 0x4e, 0x46, 0x18, 0x8f, 0x61, 0x4b, 0x32, 0xce, 0x46, 0x6c, 0xa8, 0xf4, 0x47, 0xb9, 0x28, 0x67, 0x99, 0xaf, 0x1b,
0x09, 0x5e, 0x21, 0xd6, 0x85, 0xdd, 0xda, 0x44, 0xef, 0x81, 0xe1, 0x0f, 0x75, 0x9b, 0x26, 0x37, 0x9d, 0x33, 0x32, 0x8f, 0x70, 0xbe, 0x20, 0xd8, 0xbd, 0xde, 0x19, 0x7e, 0x08, 0x3b, 0xb9, 0xb0,
0xfc, 0xe1, 0xe0, 0x07, 0x01, 0xda, 0x5c, 0x62, 0x79, 0x23, 0x22, 0xa3, 0xa7, 0xb0, 0x55, 0x4a, 0xfd, 0xc2, 0x74, 0xaa, 0xb9, 0xb3, 0xa7, 0xa6, 0xb4, 0x0f, 0x90, 0x0e, 0x60, 0xbe, 0x81, 0x15,
0xf4, 0xa0, 0xcc, 0x77, 0xc3, 0x49, 0x75, 0x7b, 0x9b, 0x61, 0x75, 0x54, 0xac, 0x45, 0x3f, 0x42, 0x52, 0x49, 0x3d, 0xaa, 0xf7, 0x9b, 0xd1, 0x68, 0x43, 0xb5, 0x88, 0x60, 0x07, 0x6e, 0x45, 0x2c,
0xe7, 0x95, 0xc8, 0x37, 0x5d, 0x3e, 0x7d, 0x54, 0x86, 0xfe, 0xe7, 0xc9, 0x74, 0xdd, 0x9b, 0x1d, 0x98, 0xaf, 0x55, 0x55, 0xa5, 0xf6, 0xde, 0x9e, 0x1e, 0xf7, 0x58, 0x40, 0x49, 0x0a, 0x39, 0x75,
0xea, 0xfc, 0x2f, 0x7b, 0x97, 0x57, 0x0e, 0xf9, 0x75, 0xe5, 0xb4, 0xbe, 0xce, 0x1d, 0x72, 0x39, 0xd8, 0xca, 0x5d, 0xf8, 0x36, 0x18, 0xdd, 0x8e, 0xa6, 0x69, 0x12, 0xa3, 0xdb, 0x69, 0xff, 0x40,
0x77, 0xc8, 0xcf, 0xb9, 0x43, 0x7e, 0xcf, 0x1d, 0xf2, 0xfd, 0x8f, 0xd3, 0x9a, 0x6c, 0xeb, 0xa7, 0x80, 0x8b, 0x8b, 0xa6, 0xf6, 0x98, 0xc6, 0xf8, 0x04, 0x4a, 0xca, 0xc2, 0x7b, 0xaa, 0xde, 0x9a,
0xf9, 0xec, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x70, 0xd4, 0x4f, 0xda, 0x03, 0x00, 0x00, 0xb5, 0xaf, 0x37, 0x56, 0x83, 0xe9, 0xe2, 0x3b, 0x1b, 0xf8, 0x1d, 0xd4, 0x5e, 0x52, 0xb9, 0xea,
0x57, 0xc6, 0x07, 0x2a, 0xf5, 0x1f, 0x8f, 0x40, 0xbd, 0xb9, 0x3e, 0x20, 0xaf, 0xff, 0xa2, 0x71,
0x71, 0x69, 0xa3, 0x5f, 0x97, 0xf6, 0xc6, 0xe7, 0x99, 0x8d, 0x2e, 0x66, 0x36, 0xfa, 0x39, 0xb3,
0xd1, 0xef, 0x99, 0x8d, 0xbe, 0xfe, 0xb1, 0x37, 0x06, 0x65, 0xfd, 0xd8, 0x3c, 0xfd, 0x1b, 0x00,
0x00, 0xff, 0xff, 0x43, 0x46, 0x5d, 0x7f, 0xac, 0x04, 0x00, 0x00,
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -705,6 +788,20 @@ func (m *AllocatableResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, e
_ = i _ = i
var l int var l int
_ = l _ = l
if len(m.Memory) > 0 {
for iNdEx := len(m.Memory) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Memory[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintApi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.CpuIds) > 0 { if len(m.CpuIds) > 0 {
dAtA2 := make([]byte, len(m.CpuIds)*10) dAtA2 := make([]byte, len(m.CpuIds)*10)
var j1 int var j1 int
@ -872,6 +969,20 @@ func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if len(m.Memory) > 0 {
for iNdEx := len(m.Memory) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Memory[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintApi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if len(m.CpuIds) > 0 { if len(m.CpuIds) > 0 {
dAtA4 := make([]byte, len(m.CpuIds)*10) dAtA4 := make([]byte, len(m.CpuIds)*10)
var j3 int var j3 int
@ -915,6 +1026,53 @@ func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *ContainerMemory) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ContainerMemory) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ContainerMemory) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Topology != nil {
{
size, err := m.Topology.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintApi(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Size_ != 0 {
i = encodeVarintApi(dAtA, i, uint64(m.Size_))
i--
dAtA[i] = 0x10
}
if len(m.MemoryType) > 0 {
i -= len(m.MemoryType)
copy(dAtA[i:], m.MemoryType)
i = encodeVarintApi(dAtA, i, uint64(len(m.MemoryType)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ContainerDevices) Marshal() (dAtA []byte, err error) { func (m *ContainerDevices) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@ -1070,6 +1228,12 @@ func (m *AllocatableResourcesResponse) Size() (n int) {
} }
n += 1 + sovApi(uint64(l)) + l n += 1 + sovApi(uint64(l)) + l
} }
if len(m.Memory) > 0 {
for _, e := range m.Memory {
l = e.Size()
n += 1 + l + sovApi(uint64(l))
}
}
return n return n
} }
@ -1143,6 +1307,32 @@ func (m *ContainerResources) Size() (n int) {
} }
n += 1 + sovApi(uint64(l)) + l n += 1 + sovApi(uint64(l)) + l
} }
if len(m.Memory) > 0 {
for _, e := range m.Memory {
l = e.Size()
n += 1 + l + sovApi(uint64(l))
}
}
return n
}
func (m *ContainerMemory) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.MemoryType)
if l > 0 {
n += 1 + l + sovApi(uint64(l))
}
if m.Size_ != 0 {
n += 1 + sovApi(uint64(m.Size_))
}
if m.Topology != nil {
l = m.Topology.Size()
n += 1 + l + sovApi(uint64(l))
}
return n return n
} }
@ -1220,9 +1410,15 @@ func (this *AllocatableResourcesResponse) String() string {
repeatedStringForDevices += strings.Replace(f.String(), "ContainerDevices", "ContainerDevices", 1) + "," repeatedStringForDevices += strings.Replace(f.String(), "ContainerDevices", "ContainerDevices", 1) + ","
} }
repeatedStringForDevices += "}" repeatedStringForDevices += "}"
repeatedStringForMemory := "[]*ContainerMemory{"
for _, f := range this.Memory {
repeatedStringForMemory += strings.Replace(f.String(), "ContainerMemory", "ContainerMemory", 1) + ","
}
repeatedStringForMemory += "}"
s := strings.Join([]string{`&AllocatableResourcesResponse{`, s := strings.Join([]string{`&AllocatableResourcesResponse{`,
`Devices:` + repeatedStringForDevices + `,`, `Devices:` + repeatedStringForDevices + `,`,
`CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`, `CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`,
`Memory:` + repeatedStringForMemory + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -1277,10 +1473,28 @@ func (this *ContainerResources) String() string {
repeatedStringForDevices += strings.Replace(f.String(), "ContainerDevices", "ContainerDevices", 1) + "," repeatedStringForDevices += strings.Replace(f.String(), "ContainerDevices", "ContainerDevices", 1) + ","
} }
repeatedStringForDevices += "}" repeatedStringForDevices += "}"
repeatedStringForMemory := "[]*ContainerMemory{"
for _, f := range this.Memory {
repeatedStringForMemory += strings.Replace(f.String(), "ContainerMemory", "ContainerMemory", 1) + ","
}
repeatedStringForMemory += "}"
s := strings.Join([]string{`&ContainerResources{`, s := strings.Join([]string{`&ContainerResources{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Devices:` + repeatedStringForDevices + `,`, `Devices:` + repeatedStringForDevices + `,`,
`CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`, `CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`,
`Memory:` + repeatedStringForMemory + `,`,
`}`,
}, "")
return s
}
func (this *ContainerMemory) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContainerMemory{`,
`MemoryType:` + fmt.Sprintf("%v", this.MemoryType) + `,`,
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
`Topology:` + strings.Replace(this.Topology.String(), "TopologyInfo", "TopologyInfo", 1) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -1519,6 +1733,40 @@ func (m *AllocatableResourcesResponse) Unmarshal(dAtA []byte) error {
} else { } else {
return fmt.Errorf("proto: wrong wireType = %d for field CpuIds", wireType) return fmt.Errorf("proto: wrong wireType = %d for field CpuIds", wireType)
} }
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthApi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthApi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Memory = append(m.Memory, &ContainerMemory{})
if err := m.Memory[len(m.Memory)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipApi(dAtA[iNdEx:]) skippy, err := skipApi(dAtA[iNdEx:])
@ -1993,6 +2241,177 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error {
} else { } else {
return fmt.Errorf("proto: wrong wireType = %d for field CpuIds", wireType) return fmt.Errorf("proto: wrong wireType = %d for field CpuIds", wireType)
} }
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthApi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthApi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Memory = append(m.Memory, &ContainerMemory{})
if err := m.Memory[len(m.Memory)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipApi(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthApi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ContainerMemory) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContainerMemory: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContainerMemory: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthApi
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthApi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MemoryType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
}
m.Size_ = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Size_ |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowApi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthApi
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthApi
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Topology == nil {
m.Topology = &TopologyInfo{}
}
if err := m.Topology.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipApi(dAtA[iNdEx:]) skippy, err := skipApi(dAtA[iNdEx:])

View File

@ -27,6 +27,7 @@ message AllocatableResourcesRequest {}
message AllocatableResourcesResponse { message AllocatableResourcesResponse {
repeated ContainerDevices devices = 1; repeated ContainerDevices devices = 1;
repeated int64 cpu_ids = 2; repeated int64 cpu_ids = 2;
repeated ContainerMemory memory = 3;
} }
// ListPodResourcesRequest is the request made to the PodResourcesLister service // ListPodResourcesRequest is the request made to the PodResourcesLister service
@ -49,6 +50,14 @@ message ContainerResources {
string name = 1; string name = 1;
repeated ContainerDevices devices = 2; repeated ContainerDevices devices = 2;
repeated int64 cpu_ids = 3; repeated int64 cpu_ids = 3;
repeated ContainerMemory memory = 4;
}
// ContainerMemory contains information about memory and hugepages assigned to a container
message ContainerMemory {
string memory_type = 1;
uint64 size = 2;
TopologyInfo topology = 3;
} }
// ContainerDevices contains information about the devices assigned to a container // ContainerDevices contains information about the devices assigned to a container

View File

@ -31,9 +31,12 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -153,8 +156,28 @@ func getMemoryManagerState() (*state.MemoryManagerCheckpoint, error) {
return memoryManagerCheckpoint, nil return memoryManagerCheckpoint, nil
} }
func getAllocatableMemoryFromStateFile(s *state.MemoryManagerCheckpoint) []state.Block {
var allocatableMemory []state.Block
for numaNodeID, numaNodeState := range s.MachineState {
for resourceName, memoryTable := range numaNodeState.MemoryMap {
if memoryTable.Allocatable == 0 {
continue
}
block := state.Block{
NUMAAffinity: []int{numaNodeID},
Type: resourceName,
Size: memoryTable.Allocatable,
}
allocatableMemory = append(allocatableMemory, block)
}
}
return allocatableMemory
}
type kubeletParams struct { type kubeletParams struct {
memoryManagerFeatureGate bool memoryManagerFeatureGate bool
podResourcesGetAllocatableFeatureGate bool
memoryManagerPolicy string memoryManagerPolicy string
systemReservedMemory []kubeletconfig.MemoryReservation systemReservedMemory []kubeletconfig.MemoryReservation
systemReserved map[string]string systemReserved map[string]string
@ -169,6 +192,8 @@ func getUpdatedKubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration, params
newCfg.FeatureGates = map[string]bool{} newCfg.FeatureGates = map[string]bool{}
} }
newCfg.FeatureGates["MemoryManager"] = params.memoryManagerFeatureGate newCfg.FeatureGates["MemoryManager"] = params.memoryManagerFeatureGate
newCfg.FeatureGates["KubeletPodResourcesGetAllocatable"] = params.podResourcesGetAllocatableFeatureGate
newCfg.MemoryManagerPolicy = params.memoryManagerPolicy newCfg.MemoryManagerPolicy = params.memoryManagerPolicy
// update system-reserved // update system-reserved
@ -257,14 +282,15 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
f := framework.NewDefaultFramework("memory-manager-test") f := framework.NewDefaultFramework("memory-manager-test")
memoryQuantatity := resource.MustParse("1100Mi") memoryQuantity := resource.MustParse("1100Mi")
defaultKubeParams := &kubeletParams{ defaultKubeParams := &kubeletParams{
memoryManagerFeatureGate: true, memoryManagerFeatureGate: true,
podResourcesGetAllocatableFeatureGate: true,
systemReservedMemory: []kubeletconfig.MemoryReservation{ systemReservedMemory: []kubeletconfig.MemoryReservation{
{ {
NumaNode: 0, NumaNode: 0,
Limits: v1.ResourceList{ Limits: v1.ResourceList{
resourceMemory: memoryQuantatity, resourceMemory: memoryQuantity,
}, },
}, },
}, },
@ -367,12 +393,14 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
} }
} }
if len(ctnParams) > 0 {
testPod = makeMemoryManagerPod(ctnParams[0].ctnName, initCtnParams, ctnParams) testPod = makeMemoryManagerPod(ctnParams[0].ctnName, initCtnParams, ctnParams)
}
}) })
ginkgo.JustAfterEach(func() { ginkgo.JustAfterEach(func() {
// delete the test pod // delete the test pod
if testPod.Name != "" { if testPod != nil && testPod.Name != "" {
f.PodClient().DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) f.PodClient().DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
} }
@ -407,6 +435,48 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
initCtnParams = []memoryManagerCtnAttributes{} initCtnParams = []memoryManagerCtnAttributes{}
}) })
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
ginkgo.It("should report memory data during request to pod resources GetAllocatableResources", func() {
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
framework.ExpectNoError(err)
gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
stateData, err := getMemoryManagerState()
framework.ExpectNoError(err)
stateAllocatableMemory := getAllocatableMemoryFromStateFile(stateData)
framework.ExpectEqual(len(resp.Memory), len(stateAllocatableMemory))
for _, containerMemory := range resp.Memory {
gomega.Expect(containerMemory.Topology).NotTo(gomega.BeNil())
framework.ExpectEqual(len(containerMemory.Topology.Nodes), 1)
gomega.Expect(containerMemory.Topology.Nodes[0]).NotTo(gomega.BeNil())
numaNodeID := int(containerMemory.Topology.Nodes[0].ID)
for _, numaStateMemory := range stateAllocatableMemory {
framework.ExpectEqual(len(numaStateMemory.NUMAAffinity), 1)
if numaNodeID != numaStateMemory.NUMAAffinity[0] {
continue
}
if containerMemory.MemoryType != string(numaStateMemory.Type) {
continue
}
gomega.Expect(containerMemory.Size_).To(gomega.BeEquivalentTo(numaStateMemory.Size))
}
}
gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
})
ginkgo.When("guaranteed pod has init and app containers", func() { ginkgo.When("guaranteed pod has init and app containers", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
// override containers parameters // override containers parameters
@ -499,6 +569,48 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
verifyMemoryPinning(testPod2, []int{0}) verifyMemoryPinning(testPod2, []int{0})
}) })
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func() {
ginkgo.By("Running the test pod and the test pod 2")
testPod = f.PodClient().CreateSync(testPod)
ginkgo.By("Running the test pod 2")
testPod2 = f.PodClient().CreateSync(testPod2)
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
framework.ExpectNoError(err)
for _, pod := range []*v1.Pod{testPod, testPod2} {
for _, podResource := range resp.PodResources {
if podResource.Name != pod.Name {
continue
}
for _, c := range pod.Spec.Containers {
for _, containerResource := range podResource.Containers {
if containerResource.Name != c.Name {
continue
}
for _, containerMemory := range containerResource.Memory {
q := c.Resources.Limits[v1.ResourceName(containerMemory.MemoryType)]
value, ok := q.AsInt64()
gomega.Expect(ok).To(gomega.BeTrue())
gomega.Expect(value).To(gomega.BeEquivalentTo(containerMemory.Size_))
}
}
}
}
}
})
ginkgo.JustAfterEach(func() { ginkgo.JustAfterEach(func() {
// delete the test pod 2 // delete the test pod 2
if testPod2.Name != "" { if testPod2.Name != "" {
@ -604,6 +716,46 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager][NodeAlphaFe
} }
}) })
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
ginkgo.It("should not report any memory data during request to pod resources GetAllocatableResources", func() {
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
framework.ExpectNoError(err)
gomega.Expect(resp.Memory).To(gomega.BeEmpty())
})
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
ginkgo.It("should not report any memory data during request to pod resources List", func() {
testPod = f.PodClient().CreateSync(testPod)
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
framework.ExpectNoError(err)
for _, podResource := range resp.PodResources {
if podResource.Name != testPod.Name {
continue
}
for _, containerResource := range podResource.Containers {
gomega.Expect(containerResource.Memory).To(gomega.BeEmpty())
}
}
})
ginkgo.It("should succeed to start the pod", func() { ginkgo.It("should succeed to start the pod", func() {
testPod = f.PodClient().CreateSync(testPod) testPod = f.PodClient().CreateSync(testPod)