Add tests for the static cpumanager policy.

This commit is contained in:
Balaji Subramaniam 2017-08-22 23:50:07 -07:00 committed by Connor Doyle
parent d0bcbbb437
commit 5b5958ecec
4 changed files with 805 additions and 0 deletions

View File

@ -29,13 +29,17 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"cpu_assignment_test.go",
"cpu_manager_test.go",
"policy_none_test.go",
"policy_static_test.go",
"policy_test.go",
],
library = ":go_default_library",
deps = [
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/cpuset:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -0,0 +1,385 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
func TestCPUAccumulatorFreeSockets(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 1 socket free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0},
},
{
"single socket HT, 0 sockets free",
topoSingleSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
[]int{},
},
{
"dual socket HT, 2 sockets free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 1},
},
{
"dual socket HT, 1 socket free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{1},
},
{
"dual socket HT, 0 sockets free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeSockets()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
}
}
func TestCPUAccumulatorFreeCores(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 4 cores free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0, 1, 2, 3},
},
{
"single socket HT, 3 cores free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 4, 5, 6),
[]int{0, 1, 2},
},
{
"single socket HT, 3 cores free (1 partially consumed)",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6),
[]int{0, 1, 2},
},
{
"single socket HT, 0 cores free",
topoSingleSocketHT,
cpuset.NewCPUSet(),
[]int{},
},
{
"single socket HT, 0 cores free (4 partially consumed)",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3),
[]int{},
},
{
"dual socket HT, 6 cores free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 2, 4, 1, 3, 5},
},
{
"dual socket HT, 5 cores free (1 consumed from socket 0)",
topoDualSocketHT,
cpuset.NewCPUSet(2, 1, 3, 4, 5, 7, 8, 9, 10, 11),
[]int{2, 4, 1, 3, 5},
},
{
"dual socket HT, 4 cores free (1 consumed from each socket)",
topoDualSocketHT,
cpuset.NewCPUSet(2, 3, 4, 5, 8, 9, 10, 11),
[]int{2, 4, 3, 5},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCores()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
}
}
func TestCPUAccumulatorFreeCPUs(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 8 cpus free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0, 4, 1, 5, 2, 6, 3, 7},
},
{
"single socket HT, 5 cpus free",
topoSingleSocketHT,
cpuset.NewCPUSet(3, 4, 5, 6, 7),
[]int{4, 5, 6, 3, 7},
},
{
"dual socket HT, 12 cpus free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
},
{
"dual socket HT, 11 cpus free",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
},
{
"dual socket HT, 10 cpus free",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
[]int{2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCPUs()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
}
}
func TestCPUAccumulatorTake(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
takeCPUs []cpuset.CPUSet
numCPUs int
expectSatisfied bool
expectFailed bool
}{
{
"take 0 cpus from a single socket HT, require 1",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet()},
1,
false,
false,
},
{
"take 0 cpus from a single socket HT, require 1, none available",
topoSingleSocketHT,
cpuset.NewCPUSet(),
[]cpuset.CPUSet{cpuset.NewCPUSet()},
1,
false,
true,
},
{
"take 1 cpu from a single socket HT, require 1",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet(0)},
1,
true,
false,
},
{
"take 1 cpu from a single socket HT, require 2",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet(0)},
2,
false,
false,
},
{
"take 2 cpu from a single socket HT, require 4, expect failed",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2),
[]cpuset.CPUSet{cpuset.NewCPUSet(0), cpuset.NewCPUSet(1)},
4,
false,
true,
},
{
"take all cpus one at a time from a single socket HT, require 8",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{
cpuset.NewCPUSet(0),
cpuset.NewCPUSet(1),
cpuset.NewCPUSet(2),
cpuset.NewCPUSet(3),
cpuset.NewCPUSet(4),
cpuset.NewCPUSet(5),
cpuset.NewCPUSet(6),
cpuset.NewCPUSet(7),
},
8,
true,
false,
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs)
totalTaken := 0
for _, cpus := range tc.takeCPUs {
acc.take(cpus)
totalTaken += cpus.Size()
}
if tc.expectSatisfied != acc.isSatisfied() {
t.Errorf("[%s] expected acc.isSatisfied() to be %t", tc.description, tc.expectSatisfied)
}
if tc.expectFailed != acc.isFailed() {
t.Errorf("[%s] expected acc.isFailed() to be %t", tc.description, tc.expectFailed)
}
for _, cpus := range tc.takeCPUs {
availableCPUs := acc.details.CPUs()
if cpus.Intersection(availableCPUs).Size() > 0 {
t.Errorf("[%s] expected intersection of taken cpus [%s] and acc.details.CPUs() [%s] to be empty", tc.description, cpus, availableCPUs)
}
if !cpus.IsSubsetOf(acc.result) {
t.Errorf("[%s] expected [%s] to be a subset of acc.result [%s]", tc.description, cpus, acc.result)
}
}
expNumCPUsNeeded := tc.numCPUs - totalTaken
if acc.numCPUsNeeded != expNumCPUsNeeded {
t.Errorf("[%s] expected acc.numCPUsNeeded to be %d (got %d)", tc.description, expNumCPUsNeeded, acc.numCPUsNeeded)
}
}
}
func TestTakeByTopology(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
numCPUs int
expErr string
expResult cpuset.CPUSet
}{
{
"take more cpus than are available from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 2, 4, 6),
5,
"not enough cpus available to satisfy request",
cpuset.NewCPUSet(),
},
{
"take zero cpus from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
0,
"",
cpuset.NewCPUSet(),
},
{
"take one cpu from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
1,
"",
cpuset.NewCPUSet(0),
},
{
"take one cpu from single socket with HT, some cpus are taken",
topoSingleSocketHT,
cpuset.NewCPUSet(1, 3, 5, 6, 7),
1,
"",
cpuset.NewCPUSet(6),
},
{
"take two cpus from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
2,
"",
cpuset.NewCPUSet(0, 4),
},
{
"take all cpus from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
8,
"",
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
},
{
"take two cpus from single socket with HT, only one core totally free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 6),
2,
"",
cpuset.NewCPUSet(2, 6),
},
{
"take three cpus from dual socket with HT - core from Socket 0",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
"",
cpuset.NewCPUSet(2),
},
{
"take a socket of cpus from dual socket with HT",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
6,
"",
cpuset.NewCPUSet(0, 2, 4, 6, 8, 10),
},
}
for _, tc := range testCases {
result, err := takeByTopology(tc.topo, tc.availableCPUs, tc.numCPUs)
if tc.expErr != "" && err.Error() != tc.expErr {
t.Errorf("expected error to be [%v] but it was [%v] in test \"%s\"", tc.expErr, err, tc.description)
}
if !result.Equals(tc.expResult) {
t.Errorf("expected result [%s] to equal [%s] in test \"%s\"", result, tc.expResult, tc.description)
}
}
}

View File

@ -0,0 +1,341 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"reflect"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
type staticPolicyTest struct {
description string
topo *topology.CPUTopology
numReservedCPUs int
containerID string
stAssignments map[string]cpuset.CPUSet
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
expErr error
expCPUAlloc bool
expCSet cpuset.CPUSet
}
func TestStaticPolicyName(t *testing.T) {
policy := NewStaticPolicy(topoSingleSocketHT, 1)
policyName := policy.Name()
if policyName != "static" {
t.Errorf("StaticPolicy Name() error. expected: static, returned: %v",
policyName)
}
}
func TestStaticPolicyStart(t *testing.T) {
policy := NewStaticPolicy(topoSingleSocketHT, 1).(*staticPolicy)
st := &mockState{
assignments: map[string]cpuset.CPUSet{},
defaultCPUSet: cpuset.NewCPUSet(),
}
policy.Start(st)
for cpuid := 1; cpuid < policy.topology.NumCPUs; cpuid++ {
if !st.defaultCPUSet.Contains(cpuid) {
t.Errorf("StaticPolicy Start() error. expected cpuid %d to be present in defaultCPUSet", cpuid)
}
}
}
func TestStaticPolicyAdd(t *testing.T) {
testCases := []staticPolicyTest{
{
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID2",
stAssignments: map[string]cpuset.CPUSet{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("8000m", "8000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID2",
stAssignments: map[string]cpuset.CPUSet{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("1000m", "1000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core
},
{
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(2, 3, 6, 7),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("2000m", "2000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 5),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocket",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(2),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("6000m", "6000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 5, 7, 9, 11),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocThreeCores",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(1, 5),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 2, 3, 4, 6, 7, 8, 9, 10, 11),
pod: makePod("6000m", "6000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(2, 3, 4, 8, 9, 10),
},
{
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocOneSocket",
topo: topoDualSocketNoHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7),
pod: makePod("4000m", "4000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4, 5, 6, 7),
},
{
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocFourCores",
topo: topoDualSocketNoHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(4, 5),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 6, 7),
pod: makePod("4000m", "4000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 6, 7),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocketOneCore",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(2),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("8000m", "8000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 4, 5, 7, 9, 10, 11),
},
{
description: "NonGuPod, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: map[string]cpuset.CPUSet{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("1000m", "2000m"),
expErr: nil,
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodNonIntegerCore, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID4",
stAssignments: map[string]cpuset.CPUSet{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("977m", "977m"),
expErr: nil,
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID5",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(1, 2, 3, 4, 5, 6),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 7),
pod: makePod("2000m", "2000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID5",
stAssignments: map[string]cpuset.CPUSet{
"fakeID100": cpuset.NewCPUSet(1, 2, 3),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("10000m", "10000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
}
for _, testCase := range testCases {
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs)
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
container := &testCase.pod.Spec.Containers[0]
err := policy.AddContainer(st, testCase.pod, container, testCase.containerID)
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected add error: %v but got: %v",
testCase.description, testCase.expErr, err)
}
if testCase.expCPUAlloc {
cset, found := st.assignments[testCase.containerID]
if !found {
t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
}
if !reflect.DeepEqual(cset, testCase.expCSet) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v but got %v",
testCase.description, testCase.expCSet, cset)
}
if !cset.Intersection(st.defaultCPUSet).IsEmpty() {
t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v to be disoint from the shared cpuset %v",
testCase.description, cset, st.defaultCPUSet)
}
}
if !testCase.expCPUAlloc {
_, found := st.assignments[testCase.containerID]
if found {
t.Errorf("StaticPolicy AddContainer() error (%v). Did not expect container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
}
}
}
}
func TestStaticPolicyRemove(t *testing.T) {
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
topo: topoSingleSocketHT,
containerID: "fakeID1",
stAssignments: map[string]cpuset.CPUSet{
"fakeID1": cpuset.NewCPUSet(1, 2, 3),
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5, 6, 7),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
},
{
description: "SingleSocketHT, DeAllocOneContainer, BeginEmpty",
topo: topoSingleSocketHT,
containerID: "fakeID1",
stAssignments: map[string]cpuset.CPUSet{
"fakeID1": cpuset.NewCPUSet(1, 2, 3),
"fakeID2": cpuset.NewCPUSet(4, 5, 6, 7),
},
stDefaultCPUSet: cpuset.NewCPUSet(),
expCSet: cpuset.NewCPUSet(1, 2, 3),
},
{
description: "SingleSocketHT, DeAllocTwoContainer",
topo: topoSingleSocketHT,
containerID: "fakeID1",
stAssignments: map[string]cpuset.CPUSet{
"fakeID1": cpuset.NewCPUSet(1, 3, 5),
"fakeID2": cpuset.NewCPUSet(2, 4),
},
stDefaultCPUSet: cpuset.NewCPUSet(6, 7),
expCSet: cpuset.NewCPUSet(1, 3, 5, 6, 7),
},
{
description: "SingleSocketHT, NoDeAlloc",
topo: topoSingleSocketHT,
containerID: "fakeID2",
stAssignments: map[string]cpuset.CPUSet{
"fakeID1": cpuset.NewCPUSet(1, 3, 5),
},
stDefaultCPUSet: cpuset.NewCPUSet(2, 4, 6, 7),
expCSet: cpuset.NewCPUSet(2, 4, 6, 7),
},
}
for _, testCase := range testCases {
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs)
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
policy.RemoveContainer(st, testCase.containerID)
if !reflect.DeepEqual(st.defaultCPUSet, testCase.expCSet) {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected default cpuset %v but got %v",
testCase.description, testCase.expCSet, st.defaultCPUSet)
}
if _, found := st.assignments[testCase.containerID]; found {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected containerID %v not be in assignments %v",
testCase.description, testCase.containerID, st.assignments)
}
}
}

View File

@ -0,0 +1,75 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
)
var (
topoSingleSocketHT = &topology.CPUTopology{
NumCPUs: 8,
NumSockets: 1,
NumCores: 4,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
4: {CoreID: 0, SocketID: 0},
5: {CoreID: 1, SocketID: 0},
6: {CoreID: 2, SocketID: 0},
7: {CoreID: 3, SocketID: 0},
},
}
topoDualSocketHT = &topology.CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 1},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 1},
4: {CoreID: 4, SocketID: 0},
5: {CoreID: 5, SocketID: 1},
6: {CoreID: 0, SocketID: 0},
7: {CoreID: 1, SocketID: 1},
8: {CoreID: 2, SocketID: 0},
9: {CoreID: 3, SocketID: 1},
10: {CoreID: 4, SocketID: 0},
11: {CoreID: 5, SocketID: 1},
},
}
topoDualSocketNoHT = &topology.CPUTopology{
NumCPUs: 8,
NumSockets: 2,
NumCores: 8,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
4: {CoreID: 4, SocketID: 1},
5: {CoreID: 5, SocketID: 1},
6: {CoreID: 6, SocketID: 1},
7: {CoreID: 7, SocketID: 1},
},
}
)