Merge pull request #97415 from AlexeyPerevalov/ExcludeSharedPoolFromPodResources

Return only isolated cpus in podresources interface
This commit is contained in:
Kubernetes Prow Robot 2021-10-08 05:58:58 -07:00 committed by GitHub
commit 2face135c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 115 additions and 60 deletions

View File

@ -1064,7 +1064,7 @@ func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.Conta
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
if cm.cpuManager != nil {
return cm.cpuManager.GetCPUs(podUID, containerName).ToSliceNoSortInt64()
return cm.cpuManager.GetExclusiveCPUs(podUID, containerName).ToSliceNoSortInt64()
}
return []int64{}
}

View File

@ -77,9 +77,9 @@ type Manager interface {
// and other resource controllers.
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
// GetCPUs implements the podresources.CPUsProvider interface to provide allocated
// cpus for the container
GetCPUs(podUID, containerName string) cpuset.CPUSet
// GetExclusiveCPUs implements the podresources.CPUsProvider interface to provide
// exclusively allocated cpus for the container
GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet
// GetPodTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment per Pod
@ -88,6 +88,10 @@ type Manager interface {
// GetAllocatableCPUs returns the assignable (not allocated) CPUs
GetAllocatableCPUs() cpuset.CPUSet
// GetCPUAffinity returns cpuset which includes cpus from shared pools
// as well as exclusively allocated cpus
GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
}
type manager struct {
@ -506,7 +510,15 @@ func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet)
})
}
func (m *manager) GetCPUs(podUID, containerName string) cpuset.CPUSet {
func (m *manager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
if result, ok := m.state.GetCPUSet(string(podUID), containerName); ok {
return result
}
return cpuset.CPUSet{}
}
func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
return m.state.GetCPUSetOrDefault(podUID, containerName)
}

View File

@ -70,8 +70,8 @@ func (m *fakeManager) State() state.Reader {
return m.state
}
func (m *fakeManager) GetCPUs(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetCPUs", "podUID", podUID, "containerName", containerName)
func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName)
return cpuset.CPUSet{}
}
@ -80,6 +80,11 @@ func (m *fakeManager) GetAllocatableCPUs() cpuset.CPUSet {
return cpuset.CPUSet{}
}
func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetCPUAffinity", "podUID", podUID, "containerName", containerName)
return cpuset.CPUSet{}
}
// NewFakeManager creates empty/fake cpu manager
func NewFakeManager() Manager {
return &fakeManager{

View File

@ -29,7 +29,7 @@ import (
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
if i.cpuManager != nil {
allocatedCPUs := i.cpuManager.GetCPUs(string(pod.UID), container.Name)
allocatedCPUs := i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name)
if !allocatedCPUs.IsEmpty() {
containerConfig.Linux.Resources.CpusetCpus = allocatedCPUs.String()
}

View File

@ -53,7 +53,7 @@ type podDesc struct {
cntName string
resourceName string
resourceAmount int
cpuCount int
cpuRequest int // cpuRequest is in millicores
}
func makePodResourcesTestPod(desc podDesc) *v1.Pod {
@ -66,9 +66,10 @@ func makePodResourcesTestPod(desc podDesc) *v1.Pod {
},
Command: []string{"sh", "-c", "sleep 1d"},
}
if desc.cpuCount > 0 {
cnt.Resources.Requests[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", desc.cpuCount))
cnt.Resources.Limits[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", desc.cpuCount))
if desc.cpuRequest > 0 {
cpuRequestQty := resource.NewMilliQuantity(int64(desc.cpuRequest), resource.DecimalSI)
cnt.Resources.Requests[v1.ResourceCPU] = *cpuRequestQty
cnt.Resources.Limits[v1.ResourceCPU] = *cpuRequestQty
// we don't really care, we only need to be in guaranteed QoS
cnt.Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi")
cnt.Resources.Limits[v1.ResourceMemory] = resource.MustParse("100Mi")
@ -186,13 +187,14 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
if !ok {
return fmt.Errorf("no container resources for pod %q container %q", podReq.podName, podReq.cntName)
}
if podReq.cpuCount > 0 {
if len(cntInfo.CpuIds) != podReq.cpuCount {
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuCount, cntInfo.CpuIds)
if podReq.cpuRequest > 0 {
if isIntegral(podReq.cpuRequest) && len(cntInfo.CpuIds) != int(podReq.cpuRequest) {
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
}
if !isIntegral(podReq.cpuRequest) && len(cntInfo.CpuIds) != 0 {
return fmt.Errorf("pod %q container %q requested %d expected to be allocated CPUs from shared pool %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
}
}
if podReq.resourceName != "" && podReq.resourceAmount > 0 {
dev := findContainerDeviceByName(cntInfo.GetDevices(), podReq.resourceName)
if dev == nil {
@ -288,19 +290,19 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 2,
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-03",
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
},
}
} else {
@ -310,19 +312,19 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
},
{
podName: "pod-01",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-03",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-03",
cntName: "cnt-00",
cpuRequest: 1000,
},
}
@ -344,12 +346,12 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 2,
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
}
} else {
@ -359,14 +361,14 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
},
{
podName: "pod-01",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
}
}
@ -380,13 +382,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
}
} else {
extra = podDesc{
podName: "pod-03",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-03",
cntName: "cnt-00",
cpuRequest: 1000,
}
}
@ -405,16 +407,16 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
if sd != nil {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1000,
},
{
podName: "pod-01",
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 2,
cpuRequest: 2000,
},
{
podName: "pod-02",
@ -425,29 +427,29 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
},
}
} else {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1000,
},
{
podName: "pod-01",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 1000,
},
{
podName: "pod-02",
cntName: "cnt-00",
},
{
podName: "pod-03",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-03",
cntName: "cnt-00",
cpuRequest: 1000,
},
}
}
@ -458,6 +460,38 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
expectedPostDelete := filterOutDesc(expected, "pod-01")
expectPodResources(1, cli, expectedPostDelete)
tpd.deletePodsForTest(f)
tpd = newTestPodData()
ginkgo.By("checking the output when pods request non integral CPUs")
if sd != nil {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1500,
},
{
podName: "pod-01",
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuRequest: 1500,
},
}
} else {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1500,
},
}
}
tpd.createPodsForTest(f, expected)
expectPodResources(1, cli, expected)
tpd.deletePodsForTest(f)
}
func podresourcesGetAllocatableResourcesTests(f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
@ -720,7 +754,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
cntName: "cnt-01",
resourceName: KubeVirtResourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
}
tpd := newTestPodData()
@ -907,3 +941,7 @@ func getKubeVirtDevicePluginPod() *v1.Pod {
return p
}
func isIntegral(cpuRequest int) bool {
return (cpuRequest % 1000) == 0
}