Merge pull request #97415 from AlexeyPerevalov/ExcludeSharedPoolFromPodResources

Return only isolated cpus in podresources interface
This commit is contained in:
Kubernetes Prow Robot 2021-10-08 05:58:58 -07:00 committed by GitHub
commit 2face135c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 115 additions and 60 deletions

View File

@ -1064,7 +1064,7 @@ func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.Conta
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 { func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
if cm.cpuManager != nil { if cm.cpuManager != nil {
return cm.cpuManager.GetCPUs(podUID, containerName).ToSliceNoSortInt64() return cm.cpuManager.GetExclusiveCPUs(podUID, containerName).ToSliceNoSortInt64()
} }
return []int64{} return []int64{}
} }

View File

@ -77,9 +77,9 @@ type Manager interface {
// and other resource controllers. // and other resource controllers.
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
// GetCPUs implements the podresources.CPUsProvider interface to provide allocated // GetExclusiveCPUs implements the podresources.CPUsProvider interface to provide
// cpus for the container // exclusively allocated cpus for the container
GetCPUs(podUID, containerName string) cpuset.CPUSet GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet
// GetPodTopologyHints implements the topologymanager.HintProvider Interface // GetPodTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment per Pod // and is consulted to achieve NUMA aware resource alignment per Pod
@ -88,6 +88,10 @@ type Manager interface {
// GetAllocatableCPUs returns the assignable (not allocated) CPUs // GetAllocatableCPUs returns the assignable (not allocated) CPUs
GetAllocatableCPUs() cpuset.CPUSet GetAllocatableCPUs() cpuset.CPUSet
// GetCPUAffinity returns cpuset which includes cpus from shared pools
// as well as exclusively allocated cpus
GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
} }
type manager struct { type manager struct {
@ -506,7 +510,15 @@ func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet)
}) })
} }
func (m *manager) GetCPUs(podUID, containerName string) cpuset.CPUSet { func (m *manager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
if result, ok := m.state.GetCPUSet(string(podUID), containerName); ok {
return result
}
return cpuset.CPUSet{}
}
func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
return m.state.GetCPUSetOrDefault(podUID, containerName) return m.state.GetCPUSetOrDefault(podUID, containerName)
} }

View File

@ -70,8 +70,8 @@ func (m *fakeManager) State() state.Reader {
return m.state return m.state
} }
func (m *fakeManager) GetCPUs(podUID, containerName string) cpuset.CPUSet { func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetCPUs", "podUID", podUID, "containerName", containerName) klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName)
return cpuset.CPUSet{} return cpuset.CPUSet{}
} }
@ -80,6 +80,11 @@ func (m *fakeManager) GetAllocatableCPUs() cpuset.CPUSet {
return cpuset.CPUSet{} return cpuset.CPUSet{}
} }
func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetCPUAffinity", "podUID", podUID, "containerName", containerName)
return cpuset.CPUSet{}
}
// NewFakeManager creates empty/fake cpu manager // NewFakeManager creates empty/fake cpu manager
func NewFakeManager() Manager { func NewFakeManager() Manager {
return &fakeManager{ return &fakeManager{

View File

@ -29,7 +29,7 @@ import (
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
if i.cpuManager != nil { if i.cpuManager != nil {
allocatedCPUs := i.cpuManager.GetCPUs(string(pod.UID), container.Name) allocatedCPUs := i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name)
if !allocatedCPUs.IsEmpty() { if !allocatedCPUs.IsEmpty() {
containerConfig.Linux.Resources.CpusetCpus = allocatedCPUs.String() containerConfig.Linux.Resources.CpusetCpus = allocatedCPUs.String()
} }

View File

@ -53,7 +53,7 @@ type podDesc struct {
cntName string cntName string
resourceName string resourceName string
resourceAmount int resourceAmount int
cpuCount int cpuRequest int // cpuRequest is in millicores
} }
func makePodResourcesTestPod(desc podDesc) *v1.Pod { func makePodResourcesTestPod(desc podDesc) *v1.Pod {
@ -66,9 +66,10 @@ func makePodResourcesTestPod(desc podDesc) *v1.Pod {
}, },
Command: []string{"sh", "-c", "sleep 1d"}, Command: []string{"sh", "-c", "sleep 1d"},
} }
if desc.cpuCount > 0 { if desc.cpuRequest > 0 {
cnt.Resources.Requests[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", desc.cpuCount)) cpuRequestQty := resource.NewMilliQuantity(int64(desc.cpuRequest), resource.DecimalSI)
cnt.Resources.Limits[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", desc.cpuCount)) cnt.Resources.Requests[v1.ResourceCPU] = *cpuRequestQty
cnt.Resources.Limits[v1.ResourceCPU] = *cpuRequestQty
// we don't really care, we only need to be in guaranteed QoS // we don't really care, we only need to be in guaranteed QoS
cnt.Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi") cnt.Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi")
cnt.Resources.Limits[v1.ResourceMemory] = resource.MustParse("100Mi") cnt.Resources.Limits[v1.ResourceMemory] = resource.MustParse("100Mi")
@ -186,13 +187,14 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
if !ok { if !ok {
return fmt.Errorf("no container resources for pod %q container %q", podReq.podName, podReq.cntName) return fmt.Errorf("no container resources for pod %q container %q", podReq.podName, podReq.cntName)
} }
if podReq.cpuRequest > 0 {
if podReq.cpuCount > 0 { if isIntegral(podReq.cpuRequest) && len(cntInfo.CpuIds) != int(podReq.cpuRequest) {
if len(cntInfo.CpuIds) != podReq.cpuCount { return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuCount, cntInfo.CpuIds) }
if !isIntegral(podReq.cpuRequest) && len(cntInfo.CpuIds) != 0 {
return fmt.Errorf("pod %q container %q requested %d expected to be allocated CPUs from shared pool %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
} }
} }
if podReq.resourceName != "" && podReq.resourceAmount > 0 { if podReq.resourceName != "" && podReq.resourceAmount > 0 {
dev := findContainerDeviceByName(cntInfo.GetDevices(), podReq.resourceName) dev := findContainerDeviceByName(cntInfo.GetDevices(), podReq.resourceName)
if dev == nil { if dev == nil {
@ -288,19 +290,19 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00", cntName: "cnt-00",
resourceName: sd.resourceName, resourceName: sd.resourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-02", podName: "pod-02",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-03", podName: "pod-03",
cntName: "cnt-00", cntName: "cnt-00",
resourceName: sd.resourceName, resourceName: sd.resourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 1, cpuRequest: 1000,
}, },
} }
} else { } else {
@ -312,17 +314,17 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
{ {
podName: "pod-01", podName: "pod-01",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-02", podName: "pod-02",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-03", podName: "pod-03",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 1, cpuRequest: 1000,
}, },
} }
@ -344,12 +346,12 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00", cntName: "cnt-00",
resourceName: sd.resourceName, resourceName: sd.resourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-02", podName: "pod-02",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 2000,
}, },
} }
} else { } else {
@ -361,12 +363,12 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
{ {
podName: "pod-01", podName: "pod-01",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-02", podName: "pod-02",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 2000,
}, },
} }
} }
@ -380,13 +382,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00", cntName: "cnt-00",
resourceName: sd.resourceName, resourceName: sd.resourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 1, cpuRequest: 1000,
} }
} else { } else {
extra = podDesc{ extra = podDesc{
podName: "pod-03", podName: "pod-03",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 1, cpuRequest: 1000,
} }
} }
@ -407,14 +409,14 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
{ {
podName: "pod-00", podName: "pod-00",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 1, cpuRequest: 1000,
}, },
{ {
podName: "pod-01", podName: "pod-01",
cntName: "cnt-00", cntName: "cnt-00",
resourceName: sd.resourceName, resourceName: sd.resourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 2, cpuRequest: 2000,
}, },
{ {
podName: "pod-02", podName: "pod-02",
@ -425,7 +427,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00", cntName: "cnt-00",
resourceName: sd.resourceName, resourceName: sd.resourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 1, cpuRequest: 1000,
}, },
} }
} else { } else {
@ -433,12 +435,12 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
{ {
podName: "pod-00", podName: "pod-00",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 1, cpuRequest: 1000,
}, },
{ {
podName: "pod-01", podName: "pod-01",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 2, cpuRequest: 1000,
}, },
{ {
podName: "pod-02", podName: "pod-02",
@ -447,7 +449,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
{ {
podName: "pod-03", podName: "pod-03",
cntName: "cnt-00", cntName: "cnt-00",
cpuCount: 1, cpuRequest: 1000,
}, },
} }
} }
@ -458,6 +460,38 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
expectedPostDelete := filterOutDesc(expected, "pod-01") expectedPostDelete := filterOutDesc(expected, "pod-01")
expectPodResources(1, cli, expectedPostDelete) expectPodResources(1, cli, expectedPostDelete)
tpd.deletePodsForTest(f) tpd.deletePodsForTest(f)
tpd = newTestPodData()
ginkgo.By("checking the output when pods request non integral CPUs")
if sd != nil {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1500,
},
{
podName: "pod-01",
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuRequest: 1500,
},
}
} else {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1500,
},
}
}
tpd.createPodsForTest(f, expected)
expectPodResources(1, cli, expected)
tpd.deletePodsForTest(f)
} }
func podresourcesGetAllocatableResourcesTests(f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) { func podresourcesGetAllocatableResourcesTests(f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
@ -720,7 +754,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
cntName: "cnt-01", cntName: "cnt-01",
resourceName: KubeVirtResourceName, resourceName: KubeVirtResourceName,
resourceAmount: 1, resourceAmount: 1,
cpuCount: 1, cpuRequest: 1000,
} }
tpd := newTestPodData() tpd := newTestPodData()
@ -907,3 +941,7 @@ func getKubeVirtDevicePluginPod() *v1.Pod {
return p return p
} }
func isIntegral(cpuRequest int) bool {
return (cpuRequest % 1000) == 0
}