mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-13 13:55:41 +00:00
Merge pull request #99640 from cynepco3hahue/handling_init_containers
memory manager: handling init containers
This commit is contained in:
commit
f50763754a
@ -193,6 +193,19 @@ func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID
|
|||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
|
|
||||||
m.containerMap.Add(string(pod.UID), container.Name, containerID)
|
m.containerMap.Add(string(pod.UID), container.Name, containerID)
|
||||||
|
|
||||||
|
// Since we know that each init container always runs to completion before
|
||||||
|
// the next container starts, we can safely remove references to any previously
|
||||||
|
// started init containers. This will free up the memory from these init containers
|
||||||
|
// for use in other pods. If the current container happens to be an init container,
|
||||||
|
// we skip deletion of it until the next container is added, and this is called again.
|
||||||
|
for _, initContainer := range pod.Spec.InitContainers {
|
||||||
|
if initContainer.Name == container.Name {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
m.policyRemoveContainerByRef(string(pod.UID), initContainer.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory
|
// GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory
|
||||||
@ -243,11 +256,7 @@ func (m *manager) RemoveContainer(containerID string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err = m.policyRemoveContainerByRef(podUID, containerName)
|
m.policyRemoveContainerByRef(podUID, containerName)
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "RemoveContainer error")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -308,22 +317,15 @@ func (m *manager) removeStaleState() {
|
|||||||
for containerName := range assignments[podUID] {
|
for containerName := range assignments[podUID] {
|
||||||
if _, ok := activeContainers[podUID][containerName]; !ok {
|
if _, ok := activeContainers[podUID][containerName]; !ok {
|
||||||
klog.InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
|
klog.InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
|
||||||
err := m.policyRemoveContainerByRef(podUID, containerName)
|
m.policyRemoveContainerByRef(podUID, containerName)
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "RemoveStaleState: failed to remove state", "podUID", podUID, "containerName", containerName)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) error {
|
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) {
|
||||||
err := m.policy.RemoveContainer(m.state, podUID, containerName)
|
m.policy.RemoveContainer(m.state, podUID, containerName)
|
||||||
if err == nil {
|
m.containerMap.RemoveByContainerRef(podUID, containerName)
|
||||||
m.containerMap.RemoveByContainerRef(podUID, containerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory []kubeletconfig.MemoryReservation) (map[v1.ResourceName]resource.Quantity, error) {
|
func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory []kubeletconfig.MemoryReservation) (map[v1.ResourceName]resource.Quantity, error) {
|
||||||
|
@ -24,15 +24,17 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||||
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
|
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
@ -100,8 +102,7 @@ func (p *mockPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Containe
|
|||||||
return p.err
|
return p.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mockPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
|
func (p *mockPolicy) RemoveContainer(s state.State, podUID string, containerName string) {
|
||||||
return p.err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mockPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
func (p *mockPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||||
@ -150,6 +151,18 @@ func getPod(podUID string, containerName string, requirements *v1.ResourceRequir
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPodWithInitContainers(podUID string, containers []v1.Container, initContainers []v1.Container) *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
UID: types.UID(podUID),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
InitContainers: initContainers,
|
||||||
|
Containers: containers,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateReservedMemory(t *testing.T) {
|
func TestValidateReservedMemory(t *testing.T) {
|
||||||
machineInfo := &cadvisorapi.MachineInfo{
|
machineInfo := &cadvisorapi.MachineInfo{
|
||||||
Topology: []cadvisorapi.Node{
|
Topology: []cadvisorapi.Node{
|
||||||
@ -1695,154 +1708,6 @@ func TestRemoveContainer(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
description: "Should fail if policy returns an error",
|
|
||||||
removeContainerID: "fakeID1",
|
|
||||||
policyName: policyTypeMock,
|
|
||||||
machineInfo: machineInfo,
|
|
||||||
reserved: reserved,
|
|
||||||
assignments: state.ContainerMemoryAssignments{
|
|
||||||
"fakePod1": map[string][]state.Block{
|
|
||||||
"fakeContainer1": {
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: v1.ResourceMemory,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: hugepages1Gi,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"fakeContainer2": {
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: v1.ResourceMemory,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: hugepages1Gi,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedAssignments: state.ContainerMemoryAssignments{
|
|
||||||
"fakePod1": map[string][]state.Block{
|
|
||||||
"fakeContainer1": {
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: v1.ResourceMemory,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: hugepages1Gi,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"fakeContainer2": {
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: v1.ResourceMemory,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NUMAAffinity: []int{0},
|
|
||||||
Type: hugepages1Gi,
|
|
||||||
Size: 1 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
machineState: state.NUMANodeMap{
|
|
||||||
0: &state.NUMANodeState{
|
|
||||||
Cells: []int{0},
|
|
||||||
NumberOfAssignments: 4,
|
|
||||||
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
|
||||||
v1.ResourceMemory: {
|
|
||||||
Allocatable: 9 * gb,
|
|
||||||
Free: 7 * gb,
|
|
||||||
Reserved: 2 * gb,
|
|
||||||
SystemReserved: 1 * gb,
|
|
||||||
TotalMemSize: 10 * gb,
|
|
||||||
},
|
|
||||||
hugepages1Gi: {
|
|
||||||
Allocatable: 5 * gb,
|
|
||||||
Free: 3 * gb,
|
|
||||||
Reserved: 2 * gb,
|
|
||||||
SystemReserved: 0 * gb,
|
|
||||||
TotalMemSize: 5 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
1: &state.NUMANodeState{
|
|
||||||
Cells: []int{1},
|
|
||||||
NumberOfAssignments: 0,
|
|
||||||
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
|
||||||
v1.ResourceMemory: {
|
|
||||||
Allocatable: 9 * gb,
|
|
||||||
Free: 9 * gb,
|
|
||||||
Reserved: 0 * gb,
|
|
||||||
SystemReserved: 1 * gb,
|
|
||||||
TotalMemSize: 10 * gb,
|
|
||||||
},
|
|
||||||
hugepages1Gi: {
|
|
||||||
Allocatable: 5 * gb,
|
|
||||||
Free: 5 * gb,
|
|
||||||
Reserved: 0,
|
|
||||||
SystemReserved: 0,
|
|
||||||
TotalMemSize: 5 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedMachineState: state.NUMANodeMap{
|
|
||||||
0: &state.NUMANodeState{
|
|
||||||
Cells: []int{0},
|
|
||||||
NumberOfAssignments: 4,
|
|
||||||
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
|
||||||
v1.ResourceMemory: {
|
|
||||||
Allocatable: 9 * gb,
|
|
||||||
Free: 7 * gb,
|
|
||||||
Reserved: 2 * gb,
|
|
||||||
SystemReserved: 1 * gb,
|
|
||||||
TotalMemSize: 10 * gb,
|
|
||||||
},
|
|
||||||
hugepages1Gi: {
|
|
||||||
Allocatable: 5 * gb,
|
|
||||||
Free: 3 * gb,
|
|
||||||
Reserved: 2 * gb,
|
|
||||||
SystemReserved: 0 * gb,
|
|
||||||
TotalMemSize: 5 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
1: &state.NUMANodeState{
|
|
||||||
Cells: []int{1},
|
|
||||||
NumberOfAssignments: 0,
|
|
||||||
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
|
||||||
v1.ResourceMemory: {
|
|
||||||
Allocatable: 9 * gb,
|
|
||||||
Free: 9 * gb,
|
|
||||||
Reserved: 0 * gb,
|
|
||||||
SystemReserved: 1 * gb,
|
|
||||||
TotalMemSize: 10 * gb,
|
|
||||||
},
|
|
||||||
hugepages1Gi: {
|
|
||||||
Allocatable: 5 * gb,
|
|
||||||
Free: 5 * gb,
|
|
||||||
Reserved: 0,
|
|
||||||
SystemReserved: 0,
|
|
||||||
TotalMemSize: 5 * gb,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedError: fmt.Errorf("fake reg error"),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
description: "Should do nothing if container is not in containerMap",
|
description: "Should do nothing if container is not in containerMap",
|
||||||
removeContainerID: "fakeID3",
|
removeContainerID: "fakeID3",
|
||||||
@ -2295,7 +2160,211 @@ func TestGetTopologyHints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAllocateAndAddPodWithInitContainers(t *testing.T) {
|
||||||
|
testCases := []testMemoryManager{
|
||||||
|
{
|
||||||
|
description: "should remove init containers from the state file, once app container started",
|
||||||
|
policyName: policyTypeStatic,
|
||||||
|
machineInfo: returnMachineInfo(),
|
||||||
|
assignments: state.ContainerMemoryAssignments{},
|
||||||
|
expectedAssignments: state.ContainerMemoryAssignments{
|
||||||
|
"pod1": map[string][]state.Block{
|
||||||
|
"container1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 4 * gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 4 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
machineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 9728 * mb,
|
||||||
|
Free: 9728 * mb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 10 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 5 * gb,
|
||||||
|
Free: 5 * gb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 5 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
},
|
||||||
|
1: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 9728 * mb,
|
||||||
|
Free: 9728 * mb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 10 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 5 * gb,
|
||||||
|
Free: 5 * gb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 5 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedMachineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 9728 * mb,
|
||||||
|
Free: 5632 * mb,
|
||||||
|
Reserved: 4 * gb,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 10 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 5 * gb,
|
||||||
|
Free: gb,
|
||||||
|
Reserved: 4 * gb,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 5 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
NumberOfAssignments: 2,
|
||||||
|
},
|
||||||
|
1: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 9728 * mb,
|
||||||
|
Free: 9728 * mb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 10 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 5 * gb,
|
||||||
|
Free: 5 * gb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 5 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
reserved: systemReservedMemory{
|
||||||
|
0: map[v1.ResourceName]uint64{
|
||||||
|
v1.ResourceMemory: 512 * mb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podAllocate: getPodWithInitContainers(
|
||||||
|
"pod1",
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "container1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "initContainer1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("7Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("5Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("7Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("5Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.description, func(t *testing.T) {
|
||||||
|
klog.InfoS("TestAllocateAndAddPodWithInitContainers", "test name", testCase.description)
|
||||||
|
mgr := &manager{
|
||||||
|
policy: returnPolicyByName(testCase),
|
||||||
|
state: state.NewMemoryState(),
|
||||||
|
containerMap: containermap.NewContainerMap(),
|
||||||
|
containerRuntime: mockRuntimeService{
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
activePods: func() []*v1.Pod { return []*v1.Pod{testCase.podAllocate} },
|
||||||
|
podStatusProvider: mockPodStatusProvider{},
|
||||||
|
}
|
||||||
|
mgr.sourcesReady = &sourcesReadyStub{}
|
||||||
|
mgr.state.SetMachineState(testCase.machineState.Clone())
|
||||||
|
mgr.state.SetMemoryAssignments(testCase.assignments.Clone())
|
||||||
|
|
||||||
|
// Allocates memory for init containers
|
||||||
|
for i := range testCase.podAllocate.Spec.InitContainers {
|
||||||
|
err := mgr.Allocate(testCase.podAllocate, &testCase.podAllocate.Spec.InitContainers[i])
|
||||||
|
if !reflect.DeepEqual(err, testCase.expectedError) {
|
||||||
|
t.Fatalf("The actual error %v is different from the expected one %v", err, testCase.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocates memory for apps containers
|
||||||
|
for i := range testCase.podAllocate.Spec.Containers {
|
||||||
|
err := mgr.Allocate(testCase.podAllocate, &testCase.podAllocate.Spec.Containers[i])
|
||||||
|
if !reflect.DeepEqual(err, testCase.expectedError) {
|
||||||
|
t.Fatalf("The actual error %v is different from the expected one %v", err, testCase.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls AddContainer for init containers
|
||||||
|
for i, initContainer := range testCase.podAllocate.Spec.InitContainers {
|
||||||
|
mgr.AddContainer(testCase.podAllocate, &testCase.podAllocate.Spec.InitContainers[i], initContainer.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls AddContainer for apps containers
|
||||||
|
for i, appContainer := range testCase.podAllocate.Spec.Containers {
|
||||||
|
mgr.AddContainer(testCase.podAllocate, &testCase.podAllocate.Spec.Containers[i], appContainer.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
assignments := mgr.state.GetMemoryAssignments()
|
||||||
|
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
|
||||||
|
t.Fatalf("Actual assignments %v are different from the expected %v", assignments, testCase.expectedAssignments)
|
||||||
|
}
|
||||||
|
|
||||||
|
machineState := mgr.state.GetMachineState()
|
||||||
|
if !areMachineStatesEqual(machineState, testCase.expectedMachineState) {
|
||||||
|
t.Fatalf("The actual machine state %v is different from the expected %v", machineState, testCase.expectedMachineState)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func returnMachineInfo() cadvisorapi.MachineInfo {
|
func returnMachineInfo() cadvisorapi.MachineInfo {
|
||||||
|
@ -32,7 +32,7 @@ type Policy interface {
|
|||||||
// Allocate call is idempotent
|
// Allocate call is idempotent
|
||||||
Allocate(s state.State, pod *v1.Pod, container *v1.Container) error
|
Allocate(s state.State, pod *v1.Pod, container *v1.Container) error
|
||||||
// RemoveContainer call is idempotent
|
// RemoveContainer call is idempotent
|
||||||
RemoveContainer(s state.State, podUID string, containerName string) error
|
RemoveContainer(s state.State, podUID string, containerName string)
|
||||||
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
||||||
// and is consulted to achieve NUMA aware resource alignment among this
|
// and is consulted to achieve NUMA aware resource alignment among this
|
||||||
// and other resource controllers.
|
// and other resource controllers.
|
||||||
|
@ -49,8 +49,7 @@ func (p *none) Allocate(s state.State, pod *v1.Pod, container *v1.Container) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveContainer call is idempotent
|
// RemoveContainer call is idempotent
|
||||||
func (p *none) RemoveContainer(s state.State, podUID string, containerName string) error {
|
func (p *none) RemoveContainer(s state.State, podUID string, containerName string) {
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
||||||
|
@ -36,6 +36,7 @@ import (
|
|||||||
const policyTypeStatic policyType = "Static"
|
const policyTypeStatic policyType = "Static"
|
||||||
|
|
||||||
type systemReservedMemory map[int]map[v1.ResourceName]uint64
|
type systemReservedMemory map[int]map[v1.ResourceName]uint64
|
||||||
|
type reusableMemory map[string]map[string]map[v1.ResourceName]uint64
|
||||||
|
|
||||||
// staticPolicy is implementation of the policy interface for the static policy
|
// staticPolicy is implementation of the policy interface for the static policy
|
||||||
type staticPolicy struct {
|
type staticPolicy struct {
|
||||||
@ -45,6 +46,8 @@ type staticPolicy struct {
|
|||||||
systemReserved systemReservedMemory
|
systemReserved systemReservedMemory
|
||||||
// topology manager reference to get container Topology affinity
|
// topology manager reference to get container Topology affinity
|
||||||
affinity topologymanager.Store
|
affinity topologymanager.Store
|
||||||
|
// initContainersReusableMemory contains the memory allocated for init containers that can be reused
|
||||||
|
initContainersReusableMemory reusableMemory
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Policy = &staticPolicy{}
|
var _ Policy = &staticPolicy{}
|
||||||
@ -65,9 +68,10 @@ func NewPolicyStatic(machineInfo *cadvisorapi.MachineInfo, reserved systemReserv
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &staticPolicy{
|
return &staticPolicy{
|
||||||
machineInfo: machineInfo,
|
machineInfo: machineInfo,
|
||||||
systemReserved: reserved,
|
systemReserved: reserved,
|
||||||
affinity: affinity,
|
affinity: affinity,
|
||||||
|
initContainersReusableMemory: reusableMemory{},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,14 +94,17 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podUID := string(pod.UID)
|
||||||
klog.InfoS("Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
|
klog.InfoS("Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
if blocks := s.GetMemoryBlocks(string(pod.UID), container.Name); blocks != nil {
|
if blocks := s.GetMemoryBlocks(podUID, container.Name); blocks != nil {
|
||||||
|
p.updatePodReusableMemory(pod, container, blocks)
|
||||||
|
|
||||||
klog.InfoS("Container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name)
|
klog.InfoS("Container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call Topology Manager to get the aligned affinity across all hint providers.
|
// Call Topology Manager to get the aligned affinity across all hint providers.
|
||||||
hint := p.affinity.GetAffinity(string(pod.UID), container.Name)
|
hint := p.affinity.GetAffinity(podUID, container.Name)
|
||||||
klog.InfoS("Got topology affinity", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "hint", hint)
|
klog.InfoS("Got topology affinity", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "hint", hint)
|
||||||
|
|
||||||
requestedResources, err := getRequestedResources(container)
|
requestedResources, err := getRequestedResources(container)
|
||||||
@ -105,11 +112,12 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
machineState := s.GetMachineState()
|
||||||
bestHint := &hint
|
bestHint := &hint
|
||||||
// topology manager returned the hint with NUMA affinity nil
|
// topology manager returned the hint with NUMA affinity nil
|
||||||
// we should use the default NUMA affinity calculated the same way as for the topology manager
|
// we should use the default NUMA affinity calculated the same way as for the topology manager
|
||||||
if hint.NUMANodeAffinity == nil {
|
if hint.NUMANodeAffinity == nil {
|
||||||
defaultHint, err := p.getDefaultHint(s, requestedResources)
|
defaultHint, err := p.getDefaultHint(machineState, pod, requestedResources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -120,12 +128,10 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
|||||||
bestHint = defaultHint
|
bestHint = defaultHint
|
||||||
}
|
}
|
||||||
|
|
||||||
machineState := s.GetMachineState()
|
|
||||||
|
|
||||||
// topology manager returns the hint that does not satisfy completely the container request
|
// topology manager returns the hint that does not satisfy completely the container request
|
||||||
// we should extend this hint to the one who will satisfy the request and include the current hint
|
// we should extend this hint to the one who will satisfy the request and include the current hint
|
||||||
if !isAffinitySatisfyRequest(machineState, bestHint.NUMANodeAffinity, requestedResources) {
|
if !isAffinitySatisfyRequest(machineState, bestHint.NUMANodeAffinity, requestedResources) {
|
||||||
extendedHint, err := p.extendTopologyManagerHint(s, requestedResources, bestHint.NUMANodeAffinity)
|
extendedHint, err := p.extendTopologyManagerHint(machineState, pod, requestedResources, bestHint.NUMANodeAffinity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -146,51 +152,86 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
|||||||
Type: resourceName,
|
Type: resourceName,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Update nodes memory state
|
podReusableMemory := p.getPodReusableMemory(pod, bestHint.NUMANodeAffinity, resourceName)
|
||||||
for _, nodeID := range maskBits {
|
if podReusableMemory >= requestedSize {
|
||||||
machineState[nodeID].NumberOfAssignments++
|
requestedSize = 0
|
||||||
machineState[nodeID].Cells = maskBits
|
} else {
|
||||||
|
requestedSize -= podReusableMemory
|
||||||
// we need to continue to update all affinity mask nodes
|
|
||||||
if requestedSize == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// update the node memory state
|
|
||||||
nodeResourceMemoryState := machineState[nodeID].MemoryMap[resourceName]
|
|
||||||
if nodeResourceMemoryState.Free <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// the node has enough memory to satisfy the request
|
|
||||||
if nodeResourceMemoryState.Free >= requestedSize {
|
|
||||||
nodeResourceMemoryState.Reserved += requestedSize
|
|
||||||
nodeResourceMemoryState.Free -= requestedSize
|
|
||||||
requestedSize = 0
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// the node does not have enough memory, use the node remaining memory and move to the next node
|
|
||||||
requestedSize -= nodeResourceMemoryState.Free
|
|
||||||
nodeResourceMemoryState.Reserved += nodeResourceMemoryState.Free
|
|
||||||
nodeResourceMemoryState.Free = 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update nodes memory state
|
||||||
|
p.updateMachineState(machineState, maskBits, resourceName, requestedSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.updatePodReusableMemory(pod, container, containerBlocks)
|
||||||
|
|
||||||
s.SetMachineState(machineState)
|
s.SetMachineState(machineState)
|
||||||
s.SetMemoryBlocks(string(pod.UID), container.Name, containerBlocks)
|
s.SetMemoryBlocks(podUID, container.Name, containerBlocks)
|
||||||
|
|
||||||
|
// update init containers memory blocks to reflect the fact that we re-used init containers memory
|
||||||
|
// it is possible that the size of the init container memory block will have 0 value, when all memory
|
||||||
|
// allocated for it was re-used
|
||||||
|
// we only do this so that the sum(memory_for_all_containers) == total amount of allocated memory to the pod, even
|
||||||
|
// though the final state here doesn't accurately reflect what was (in reality) allocated to each container
|
||||||
|
// TODO: we should refactor our state structs to reflect the amount of the re-used memory
|
||||||
|
p.updateInitContainersMemoryBlocks(s, pod, container, containerBlocks)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveContainer call is idempotent
|
func (p *staticPolicy) updateMachineState(machineState state.NUMANodeMap, numaAffinity []int, resourceName v1.ResourceName, requestedSize uint64) {
|
||||||
func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
|
for _, nodeID := range numaAffinity {
|
||||||
klog.InfoS("RemoveContainer", "podUID", podUID, "containerName", containerName)
|
machineState[nodeID].NumberOfAssignments++
|
||||||
blocks := s.GetMemoryBlocks(podUID, containerName)
|
machineState[nodeID].Cells = numaAffinity
|
||||||
if blocks == nil {
|
|
||||||
return nil
|
// we need to continue to update all affinity mask nodes
|
||||||
|
if requestedSize == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the node memory state
|
||||||
|
nodeResourceMemoryState := machineState[nodeID].MemoryMap[resourceName]
|
||||||
|
if nodeResourceMemoryState.Free <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// the node has enough memory to satisfy the request
|
||||||
|
if nodeResourceMemoryState.Free >= requestedSize {
|
||||||
|
nodeResourceMemoryState.Reserved += requestedSize
|
||||||
|
nodeResourceMemoryState.Free -= requestedSize
|
||||||
|
requestedSize = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// the node does not have enough memory, use the node remaining memory and move to the next node
|
||||||
|
requestedSize -= nodeResourceMemoryState.Free
|
||||||
|
nodeResourceMemoryState.Reserved += nodeResourceMemoryState.Free
|
||||||
|
nodeResourceMemoryState.Free = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *staticPolicy) getPodReusableMemory(pod *v1.Pod, numaAffinity bitmask.BitMask, resourceName v1.ResourceName) uint64 {
|
||||||
|
podReusableMemory, ok := p.initContainersReusableMemory[string(pod.UID)]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
numaReusableMemory, ok := podReusableMemory[numaAffinity.String()]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return numaReusableMemory[resourceName]
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveContainer call is idempotent
|
||||||
|
func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) {
|
||||||
|
blocks := s.GetMemoryBlocks(podUID, containerName)
|
||||||
|
if blocks == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.InfoS("RemoveContainer", "podUID", podUID, "containerName", containerName)
|
||||||
s.Delete(podUID, containerName)
|
s.Delete(podUID, containerName)
|
||||||
|
|
||||||
// Mutate machine memory state to update free and reserved memory
|
// Mutate machine memory state to update free and reserved memory
|
||||||
@ -234,8 +275,6 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.SetMachineState(machineState)
|
s.SetMachineState(machineState)
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func regenerateHints(pod *v1.Pod, ctn *v1.Container, ctnBlocks []state.Block, reqRsrc map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint {
|
func regenerateHints(pod *v1.Pod, ctn *v1.Container, ctnBlocks []state.Block, reqRsrc map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint {
|
||||||
@ -339,7 +378,9 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
|
|||||||
return regenerateHints(pod, &ctn, containerBlocks, reqRsrcs)
|
return regenerateHints(pod, &ctn, containerBlocks, reqRsrcs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return p.calculateHints(s, reqRsrcs)
|
|
||||||
|
// the pod topology hints calculated only once for all containers, so no need to pass re-usable state
|
||||||
|
return p.calculateHints(s.GetMachineState(), pod, reqRsrcs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
||||||
@ -364,7 +405,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
|
|||||||
return regenerateHints(pod, container, containerBlocks, requestedResources)
|
return regenerateHints(pod, container, containerBlocks, requestedResources)
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.calculateHints(s, requestedResources)
|
return p.calculateHints(s.GetMachineState(), pod, requestedResources)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRequestedResources(container *v1.Container) (map[v1.ResourceName]uint64, error) {
|
func getRequestedResources(container *v1.Container) (map[v1.ResourceName]uint64, error) {
|
||||||
@ -382,8 +423,7 @@ func getRequestedResources(container *v1.Container) (map[v1.ResourceName]uint64,
|
|||||||
return requestedResources, nil
|
return requestedResources, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) calculateHints(s state.State, requestedResources map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint {
|
func (p *staticPolicy) calculateHints(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint {
|
||||||
machineState := s.GetMachineState()
|
|
||||||
var numaNodes []int
|
var numaNodes []int
|
||||||
for n := range machineState {
|
for n := range machineState {
|
||||||
numaNodes = append(numaNodes, n)
|
numaNodes = append(numaNodes, n)
|
||||||
@ -447,7 +487,8 @@ func (p *staticPolicy) calculateHints(s state.State, requestedResources map[v1.R
|
|||||||
|
|
||||||
// verify that for all memory types the node mask has enough free resources
|
// verify that for all memory types the node mask has enough free resources
|
||||||
for resourceName, requestedSize := range requestedResources {
|
for resourceName, requestedSize := range requestedResources {
|
||||||
if totalFreeSize[resourceName] < requestedSize {
|
podReusableMemory := p.getPodReusableMemory(pod, mask, resourceName)
|
||||||
|
if totalFreeSize[resourceName]+podReusableMemory < requestedSize {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -671,8 +712,8 @@ func (p *staticPolicy) getResourceSystemReserved(nodeID int, resourceName v1.Res
|
|||||||
return systemReserved
|
return systemReserved
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) getDefaultHint(s state.State, requestedResources map[v1.ResourceName]uint64) (*topologymanager.TopologyHint, error) {
|
func (p *staticPolicy) getDefaultHint(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64) (*topologymanager.TopologyHint, error) {
|
||||||
hints := p.calculateHints(s, requestedResources)
|
hints := p.calculateHints(machineState, pod, requestedResources)
|
||||||
if len(hints) < 1 {
|
if len(hints) < 1 {
|
||||||
return nil, fmt.Errorf("[memorymanager] failed to get the default NUMA affinity, no NUMA nodes with enough memory is available")
|
return nil, fmt.Errorf("[memorymanager] failed to get the default NUMA affinity, no NUMA nodes with enough memory is available")
|
||||||
}
|
}
|
||||||
@ -706,8 +747,8 @@ func isAffinitySatisfyRequest(machineState state.NUMANodeMap, mask bitmask.BitMa
|
|||||||
// the topology manager uses bitwise AND to merge all topology hints into the best one, so in case of the restricted policy,
|
// the topology manager uses bitwise AND to merge all topology hints into the best one, so in case of the restricted policy,
|
||||||
// it possible that we will get the subset of hint that we provided to the topology manager, in this case we want to extend
|
// it possible that we will get the subset of hint that we provided to the topology manager, in this case we want to extend
|
||||||
// it to the original one
|
// it to the original one
|
||||||
func (p *staticPolicy) extendTopologyManagerHint(s state.State, requestedResources map[v1.ResourceName]uint64, mask bitmask.BitMask) (*topologymanager.TopologyHint, error) {
|
func (p *staticPolicy) extendTopologyManagerHint(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64, mask bitmask.BitMask) (*topologymanager.TopologyHint, error) {
|
||||||
hints := p.calculateHints(s, requestedResources)
|
hints := p.calculateHints(machineState, pod, requestedResources)
|
||||||
|
|
||||||
var filteredHints []topologymanager.TopologyHint
|
var filteredHints []topologymanager.TopologyHint
|
||||||
// hints for all memory types should be the same, so we will check hints only for regular memory type
|
// hints for all memory types should be the same, so we will check hints only for regular memory type
|
||||||
@ -742,7 +783,8 @@ func isHintInGroup(hint []int, group []int) bool {
|
|||||||
}
|
}
|
||||||
hintIndex++
|
hintIndex++
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
|
return hintIndex == len(hint)
|
||||||
}
|
}
|
||||||
|
|
||||||
func findBestHint(hints []topologymanager.TopologyHint) *topologymanager.TopologyHint {
|
func findBestHint(hints []topologymanager.TopologyHint) *topologymanager.TopologyHint {
|
||||||
@ -788,3 +830,121 @@ func (p *staticPolicy) GetAllocatableMemory(s state.State) []state.Block {
|
|||||||
}
|
}
|
||||||
return allocatableMemory
|
return allocatableMemory
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *staticPolicy) updatePodReusableMemory(pod *v1.Pod, container *v1.Container, memoryBlocks []state.Block) {
|
||||||
|
podUID := string(pod.UID)
|
||||||
|
|
||||||
|
// If pod entries to m.initContainersReusableMemory other than the current pod exist, delete them.
|
||||||
|
for uid := range p.initContainersReusableMemory {
|
||||||
|
if podUID != uid {
|
||||||
|
delete(p.initContainersReusableMemory, podUID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isInitContainer(pod, container) {
|
||||||
|
if _, ok := p.initContainersReusableMemory[podUID]; !ok {
|
||||||
|
p.initContainersReusableMemory[podUID] = map[string]map[v1.ResourceName]uint64{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, block := range memoryBlocks {
|
||||||
|
blockBitMask, _ := bitmask.NewBitMask(block.NUMAAffinity...)
|
||||||
|
blockBitMaskString := blockBitMask.String()
|
||||||
|
|
||||||
|
if _, ok := p.initContainersReusableMemory[podUID][blockBitMaskString]; !ok {
|
||||||
|
p.initContainersReusableMemory[podUID][blockBitMaskString] = map[v1.ResourceName]uint64{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockReusableMemory := p.initContainersReusableMemory[podUID][blockBitMaskString][block.Type]; block.Size > blockReusableMemory {
|
||||||
|
p.initContainersReusableMemory[podUID][blockBitMaskString][block.Type] = block.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// update re-usable memory once it used by the app container
|
||||||
|
for _, block := range memoryBlocks {
|
||||||
|
blockBitMask, _ := bitmask.NewBitMask(block.NUMAAffinity...)
|
||||||
|
if podReusableMemory := p.getPodReusableMemory(pod, blockBitMask, block.Type); podReusableMemory != 0 {
|
||||||
|
if block.Size >= podReusableMemory {
|
||||||
|
p.initContainersReusableMemory[podUID][blockBitMask.String()][block.Type] = 0
|
||||||
|
} else {
|
||||||
|
p.initContainersReusableMemory[podUID][blockBitMask.String()][block.Type] -= block.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *staticPolicy) updateInitContainersMemoryBlocks(s state.State, pod *v1.Pod, container *v1.Container, containerMemoryBlocks []state.Block) {
|
||||||
|
podUID := string(pod.UID)
|
||||||
|
|
||||||
|
for _, containerBlock := range containerMemoryBlocks {
|
||||||
|
blockSize := containerBlock.Size
|
||||||
|
for _, initContainer := range pod.Spec.InitContainers {
|
||||||
|
// we do not want to continue updates once we reach the current container
|
||||||
|
if initContainer.Name == container.Name {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockSize == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
initContainerBlocks := s.GetMemoryBlocks(podUID, initContainer.Name)
|
||||||
|
if len(initContainerBlocks) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range initContainerBlocks {
|
||||||
|
initContainerBlock := &initContainerBlocks[i]
|
||||||
|
if initContainerBlock.Size == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if initContainerBlock.Type != containerBlock.Type {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isNUMAAffinitiesEqual(initContainerBlock.NUMAAffinity, containerBlock.NUMAAffinity) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if initContainerBlock.Size > blockSize {
|
||||||
|
initContainerBlock.Size -= blockSize
|
||||||
|
blockSize = 0
|
||||||
|
} else {
|
||||||
|
blockSize -= initContainerBlock.Size
|
||||||
|
initContainerBlock.Size = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.SetMemoryBlocks(podUID, initContainer.Name, initContainerBlocks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInitContainer(pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
for _, initContainer := range pod.Spec.InitContainers {
|
||||||
|
if initContainer.Name == container.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNUMAAffinitiesEqual(numaAffinity1, numaAffinity2 []int) bool {
|
||||||
|
bitMask1, err := bitmask.NewBitMask(numaAffinity1...)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "failed to create bit mask", numaAffinity1)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
bitMask2, err := bitmask.NewBitMask(numaAffinity2...)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "failed to create bit mask", numaAffinity2)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return bitMask1.IsEqual(bitMask2)
|
||||||
|
}
|
||||||
|
@ -21,6 +21,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -1788,6 +1790,561 @@ func TestStaticPolicyAllocate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStaticPolicyAllocateWithInitContainers(t *testing.T) {
|
||||||
|
testCases := []testStaticPolicy{
|
||||||
|
{
|
||||||
|
description: "should re-use init containers memory, init containers requests 1Gi and 2Gi, apps containers 3Gi and 4Gi",
|
||||||
|
assignments: state.ContainerMemoryAssignments{},
|
||||||
|
expectedAssignments: state.ContainerMemoryAssignments{
|
||||||
|
"pod1": map[string][]state.Block{
|
||||||
|
"initContainer1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"initContainer2": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 3 * gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 3 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container2": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 4 * gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 4 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
machineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 7680 * mb,
|
||||||
|
Free: 7680 * mb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 8 * gb,
|
||||||
|
Free: 8 * gb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedMachineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 7680 * mb,
|
||||||
|
Free: 512 * mb,
|
||||||
|
Reserved: 7 * gb,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 8 * gb,
|
||||||
|
Free: 1 * gb,
|
||||||
|
Reserved: 7 * gb,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
NumberOfAssignments: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
systemReserved: systemReservedMemory{
|
||||||
|
0: map[v1.ResourceName]uint64{
|
||||||
|
v1.ResourceMemory: 512 * mb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pod: getPodWithInitContainers(
|
||||||
|
"pod1",
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "container1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("3Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("3Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "container2",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "initContainer1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "initContainer2",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
topologyHint: &topologymanager.TopologyHint{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "should re-use init containers memory, init containers requests 4Gi and 3Gi, apps containers 2Gi and 1Gi",
|
||||||
|
assignments: state.ContainerMemoryAssignments{},
|
||||||
|
expectedAssignments: state.ContainerMemoryAssignments{
|
||||||
|
"pod1": map[string][]state.Block{
|
||||||
|
"initContainer1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"initContainer2": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 2 * gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 2 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container2": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
machineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 7680 * mb,
|
||||||
|
Free: 7680 * mb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 8 * gb,
|
||||||
|
Free: 8 * gb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedMachineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 7680 * mb,
|
||||||
|
Free: 3584 * mb,
|
||||||
|
Reserved: 4 * gb,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 8 * gb,
|
||||||
|
Free: 4 * gb,
|
||||||
|
Reserved: 4 * gb,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
NumberOfAssignments: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
systemReserved: systemReservedMemory{
|
||||||
|
0: map[v1.ResourceName]uint64{
|
||||||
|
v1.ResourceMemory: 512 * mb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pod: getPodWithInitContainers(
|
||||||
|
"pod1",
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "container1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "container2",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "initContainer1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "initContainer2",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("3Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("3Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
topologyHint: &topologymanager.TopologyHint{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "should re-use init containers memory, init containers requests 7Gi and 4Gi, apps containers 4Gi and 3Gi",
|
||||||
|
assignments: state.ContainerMemoryAssignments{},
|
||||||
|
expectedAssignments: state.ContainerMemoryAssignments{
|
||||||
|
"pod1": map[string][]state.Block{
|
||||||
|
"initContainer1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"initContainer2": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container1": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 4 * gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 4 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"container2": {
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: v1.ResourceMemory,
|
||||||
|
Size: 3 * gb,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NUMAAffinity: []int{0},
|
||||||
|
Type: hugepages1Gi,
|
||||||
|
Size: 3 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
machineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 7680 * mb,
|
||||||
|
Free: 7680 * mb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 8 * gb,
|
||||||
|
Free: 8 * gb,
|
||||||
|
Reserved: 0,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedMachineState: state.NUMANodeMap{
|
||||||
|
0: &state.NUMANodeState{
|
||||||
|
MemoryMap: map[v1.ResourceName]*state.MemoryTable{
|
||||||
|
v1.ResourceMemory: {
|
||||||
|
Allocatable: 7680 * mb,
|
||||||
|
Free: 512 * mb,
|
||||||
|
Reserved: 7 * gb,
|
||||||
|
SystemReserved: 512 * mb,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
hugepages1Gi: {
|
||||||
|
Allocatable: 8 * gb,
|
||||||
|
Free: 1 * gb,
|
||||||
|
Reserved: 7 * gb,
|
||||||
|
SystemReserved: 0,
|
||||||
|
TotalMemSize: 8 * gb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Cells: []int{0},
|
||||||
|
NumberOfAssignments: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
systemReserved: systemReservedMemory{
|
||||||
|
0: map[v1.ResourceName]uint64{
|
||||||
|
v1.ResourceMemory: 512 * mb,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pod: getPodWithInitContainers(
|
||||||
|
"pod1",
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "container1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "container2",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("3Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("3Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[]v1.Container{
|
||||||
|
{
|
||||||
|
Name: "initContainer1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("7Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("7Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("7Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("7Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "initContainer2",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("1000Mi"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||||
|
hugepages1Gi: resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
topologyHint: &topologymanager.TopologyHint{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.description, func(t *testing.T) {
|
||||||
|
klog.InfoS("TestStaticPolicyAllocateWithInitContainers", "test name", testCase.description)
|
||||||
|
p, s, err := initTests(t, &testCase, testCase.topologyHint)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range testCase.pod.Spec.InitContainers {
|
||||||
|
err = p.Allocate(s, testCase.pod, &testCase.pod.Spec.InitContainers[i])
|
||||||
|
if !reflect.DeepEqual(err, testCase.expectedError) {
|
||||||
|
t.Fatalf("The actual error %v is different from the expected one %v", err, testCase.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range testCase.pod.Spec.Containers {
|
||||||
|
err = p.Allocate(s, testCase.pod, &testCase.pod.Spec.Containers[i])
|
||||||
|
if !reflect.DeepEqual(err, testCase.expectedError) {
|
||||||
|
t.Fatalf("The actual error %v is different from the expected one %v", err, testCase.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assignments := s.GetMemoryAssignments()
|
||||||
|
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
|
||||||
|
t.Fatalf("Actual assignments %v are different from the expected %v", assignments, testCase.expectedAssignments)
|
||||||
|
}
|
||||||
|
|
||||||
|
machineState := s.GetMachineState()
|
||||||
|
if !areMachineStatesEqual(machineState, testCase.expectedMachineState) {
|
||||||
|
t.Fatalf("The actual machine state %v is different from the expected %v", machineState, testCase.expectedMachineState)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestStaticPolicyRemoveContainer(t *testing.T) {
|
func TestStaticPolicyRemoveContainer(t *testing.T) {
|
||||||
testCases := []testStaticPolicy{
|
testCases := []testStaticPolicy{
|
||||||
{
|
{
|
||||||
@ -2031,15 +2588,7 @@ func TestStaticPolicyRemoveContainer(t *testing.T) {
|
|||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = p.RemoveContainer(s, "pod1", "container1")
|
p.RemoveContainer(s, "pod1", "container1")
|
||||||
if !reflect.DeepEqual(err, testCase.expectedError) {
|
|
||||||
t.Fatalf("The actual error %v is different from the expected one %v", err, testCase.expectedError)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
assignments := s.GetMemoryAssignments()
|
assignments := s.GetMemoryAssignments()
|
||||||
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
|
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
|
||||||
t.Fatalf("Actual assignments %v are different from the expected %v", assignments, testCase.expectedAssignments)
|
t.Fatalf("Actual assignments %v are different from the expected %v", assignments, testCase.expectedAssignments)
|
||||||
|
Loading…
Reference in New Issue
Block a user