From 9f36f1a1735c66c1b85ea2588ab61a7447534d42 Mon Sep 17 00:00:00 2001 From: Kevin Klues Date: Wed, 22 May 2019 11:08:49 -0700 Subject: [PATCH] Add tests for proactive init Container removal in the CPUManager static policy --- pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 43 ++++ .../cm/cpumanager/policy_static_test.go | 227 ++++++++++++++++++ 2 files changed, 270 insertions(+) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index 1bb68dcb772..ea4144567cb 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -140,6 +140,49 @@ func makePod(cpuRequest, cpuLimit string) *v1.Pod { } } +func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string }) *v1.Pod { + pod := &v1.Pod{ + Spec: v1.PodSpec{ + InitContainers: []v1.Container{}, + Containers: []v1.Container{}, + }, + } + + for i, cpu := range initCPUs { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{ + Name: "initContainer-" + string(i), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.request), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.limit), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"), + }, + }, + }) + } + + for i, cpu := range appCPUs { + pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{ + Name: "appContainer-" + string(i), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.request), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"), + }, + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.limit), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"), + }, + }, + }) + } + + return pod +} + func TestCPUManagerAdd(t *testing.T) { testPolicy := NewStaticPolicy( &topology.CPUTopology{ diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go index d4d84d0da19..9103f9c0c85 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go @@ -41,6 +41,22 @@ type staticPolicyTest struct { expPanic bool } +type staticPolicyMultiContainerTest struct { + description string + topo *topology.CPUTopology + numReservedCPUs int + initContainerIDs []string + containerIDs []string + stAssignments state.ContainerCPUAssignments + stDefaultCPUSet cpuset.CPUSet + pod *v1.Pod + expErr error + expCPUAlloc bool + expInitCSets []cpuset.CPUSet + expCSets []cpuset.CPUSet + expPanic bool +} + func TestStaticPolicyName(t *testing.T) { policy := NewStaticPolicy(topoSingleSocketHT, 1) @@ -445,6 +461,217 @@ func TestStaticPolicyAdd(t *testing.T) { } } +func TestStaticPolicyAddWithInitContainers(t *testing.T) { + testCases := []staticPolicyMultiContainerTest{ + { + description: "No Guaranteed Init CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"100m", "100m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet()}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5)}, + }, + { + description: "Equal Number of Guaranteed CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"4000m", "4000m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5)}, + }, + { + description: "More Init Container Guaranteed CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"6000m", "6000m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5)}, + }, + { + description: "Less Init Container Guaranteed CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{{"2000m", "2000m"}}, + []struct{ request, limit string }{{"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5)}, + }, + { + description: "Multi Init Container Equal CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"2000m", "2000m"}}, + []struct{ request, limit string }{ + {"2000m", "2000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4), + cpuset.NewCPUSet(0, 4)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4)}, + }, + { + description: "Multi Init Container Less CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"4000m", "4000m"}, + {"4000m", "4000m"}}, + []struct{ request, limit string }{ + {"2000m", "2000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5), + cpuset.NewCPUSet(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4)}, + }, + { + description: "Multi Init Container More CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"2000m", "2000m"}}, + []struct{ request, limit string }{ + {"4000m", "4000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4), + cpuset.NewCPUSet(0, 4)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5)}, + }, + { + description: "Multi Init Container Increasing CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"4000m", "4000m"}}, + []struct{ request, limit string }{ + {"6000m", "6000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4), + cpuset.NewCPUSet(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)}, + }, + { + description: "Multi Init, Multi App Container Split CPUs", + topo: topoSingleSocketHT, + numReservedCPUs: 0, + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, + containerIDs: []string{"appFakeID-1", "appFakeID-2"}, + pod: makeMultiContainerPod( + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"4000m", "4000m"}}, + []struct{ request, limit string }{ + {"2000m", "2000m"}, + {"2000m", "2000m"}}), + expInitCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4), + cpuset.NewCPUSet(0, 4, 1, 5)}, + expCSets: []cpuset.CPUSet{ + cpuset.NewCPUSet(0, 4), + cpuset.NewCPUSet(1, 5)}, + }, + } + + for _, testCase := range testCases { + policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs) + + st := &mockState{ + assignments: testCase.stAssignments, + defaultCPUSet: testCase.stDefaultCPUSet, + } + + containers := append( + testCase.pod.Spec.InitContainers, + testCase.pod.Spec.Containers...) + + containerIDs := append( + testCase.initContainerIDs, + testCase.containerIDs...) + + expCSets := append( + testCase.expInitCSets, + testCase.expCSets...) + + for i := range containers { + err := policy.AddContainer(st, testCase.pod, &containers[i], containerIDs[i]) + if err != nil { + t.Errorf("StaticPolicy AddContainer() error (%v). unexpected error for container id: %v: %v", + testCase.description, containerIDs[i], err) + } + + cset, found := st.assignments[containerIDs[i]] + if !expCSets[i].IsEmpty() && !found { + t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v", + testCase.description, containerIDs[i], st.assignments) + } + + if found && !cset.Equals(expCSets[i]) { + t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v for container %v but got %v", + testCase.description, expCSets[i], containerIDs[i], cset) + } + } + } +} + func TestStaticPolicyRemove(t *testing.T) { testCases := []staticPolicyTest{ {