Merge pull request #112913 from Garrybest/pr_cpumanager

fix GetAllocatableCPUs in cpumanager
This commit is contained in:
Kubernetes Prow Robot 2022-10-27 07:20:33 -07:00 committed by GitHub
commit ab4907d2f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 78 additions and 7 deletions

View File

@ -25,9 +25,9 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
@ -86,7 +86,7 @@ type Manager interface {
// among this and other resource controllers.
GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint
// GetAllocatableCPUs returns the assignable (not allocated) CPUs
// GetAllocatableCPUs returns the total set of CPUs available for allocation.
GetAllocatableCPUs() cpuset.CPUSet
// GetCPUAffinity returns cpuset which includes cpus from shared pools

View File

@ -31,6 +31,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
@ -1364,3 +1365,66 @@ func TestCPUManagerHandlePolicyOptions(t *testing.T) {
}
}
func TestCPUManagerGetAllocatableCPUs(t *testing.T) {
nonePolicy, _ := NewNonePolicy(nil)
staticPolicy, _ := NewStaticPolicy(
&topology.CPUTopology{
NumCPUs: 4,
NumSockets: 1,
NumCores: 4,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
},
},
1,
cpuset.NewCPUSet(0),
topologymanager.NewFakeManager(),
nil)
testCases := []struct {
description string
policy Policy
expAllocatableCPUs cpuset.CPUSet
}{
{
description: "None Policy",
policy: nonePolicy,
expAllocatableCPUs: cpuset.NewCPUSet(),
},
{
description: "Static Policy",
policy: staticPolicy,
expAllocatableCPUs: cpuset.NewCPUSet(1, 2, 3),
},
}
for _, testCase := range testCases {
mgr := &manager{
policy: testCase.policy,
activePods: func() []*v1.Pod { return nil },
state: &mockState{
assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3),
},
lastUpdateState: state.NewMemoryState(),
containerMap: containermap.NewContainerMap(),
podStatusProvider: mockPodStatusProvider{},
sourcesReady: &sourcesReadyStub{},
}
mgr.sourcesReady = &sourcesReadyStub{}
mgr.allocatableCPUs = testCase.policy.GetAllocatableCPUs(mgr.state)
pod := makePod("fakePod", "fakeContainer", "2", "2")
container := &pod.Spec.Containers[0]
_ = mgr.Allocate(pod, container)
if !mgr.GetAllocatableCPUs().Equals(testCase.expAllocatableCPUs) {
t.Errorf("Policy GetAllocatableCPUs() error (%v). expected cpuset %v for container %v but got %v",
testCase.description, testCase.expAllocatableCPUs, "fakeContainer", mgr.GetAllocatableCPUs())
}
}
}

View File

@ -18,6 +18,7 @@ package cpumanager
import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
@ -39,6 +40,6 @@ type Policy interface {
// and is consulted to achieve NUMA aware resource alignment per Pod
// among this and other resource controllers.
GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint
// GetAllocatableCPUs returns the assignable (not allocated) CPUs
// GetAllocatableCPUs returns the total set of CPUs available for allocation.
GetAllocatableCPUs(m state.State) cpuset.CPUSet
}

View File

@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
@ -223,8 +224,13 @@ func (p *staticPolicy) validateState(s state.State) error {
return nil
}
// GetAllocatableCPUs returns the set of unassigned CPUs minus the reserved set.
// GetAllocatableCPUs returns the total set of CPUs available for allocation.
func (p *staticPolicy) GetAllocatableCPUs(s state.State) cpuset.CPUSet {
return p.topology.CPUDetails.CPUs().Difference(p.reserved)
}
// GetAvailableCPUs returns the set of unassigned CPUs minus the reserved set.
func (p *staticPolicy) GetAvailableCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reserved)
}
@ -324,7 +330,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
klog.InfoS("AllocateCPUs", "numCPUs", numCPUs, "socket", numaAffinity)
allocatableCPUs := p.GetAllocatableCPUs(s).Union(reusableCPUs)
allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs)
// If there are aligned CPUs in numaAffinity, attempt to take those first.
result := cpuset.NewCPUSet()
@ -442,7 +448,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
}
// Get a list of available CPUs.
available := p.GetAllocatableCPUs(s)
available := p.GetAvailableCPUs(s)
// Get a list of reusable CPUs (e.g. CPUs reused from initContainers).
// It should be an empty CPUSet for a newly created pod.
@ -497,7 +503,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
}
// Get a list of available CPUs.
available := p.GetAllocatableCPUs(s)
available := p.GetAvailableCPUs(s)
// Get a list of reusable CPUs (e.g. CPUs reused from initContainers).
// It should be an empty CPUSet for a newly created pod.