mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
node: podresources: make GetCPUs return cpuset
a upcoming patch wants to add GetAllocatableCPUs() returning a cpuset. To make the code consistent and a bit more flexible, we change the existing interface to also return a cpuset. Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
parent
4e7434028c
commit
1375c5bdc7
@ -60,7 +60,7 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
|
|||||||
pRes.Containers[j] = &v1.ContainerResources{
|
pRes.Containers[j] = &v1.ContainerResources{
|
||||||
Name: container.Name,
|
Name: container.Name,
|
||||||
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
|
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
|
||||||
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
|
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name).ToSliceNoSortInt64(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
podResources[i] = &pRes
|
podResources[i] = &pRes
|
||||||
|
@ -18,12 +18,15 @@ package podresources
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestListPodResourcesV1(t *testing.T) {
|
func TestListPodResourcesV1(t *testing.T) {
|
||||||
@ -41,20 +44,20 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cpus := []int64{12, 23, 30}
|
cpus := cpuset.NewCPUSet(12, 23, 30)
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
desc string
|
desc string
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
devices []*podresourcesapi.ContainerDevices
|
devices []*podresourcesapi.ContainerDevices
|
||||||
cpus []int64
|
cpus cpuset.CPUSet
|
||||||
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "no pods",
|
desc: "no pods",
|
||||||
pods: []*v1.Pod{},
|
pods: []*v1.Pod{},
|
||||||
devices: []*podresourcesapi.ContainerDevices{},
|
devices: []*podresourcesapi.ContainerDevices{},
|
||||||
cpus: []int64{},
|
cpus: cpuset.CPUSet{},
|
||||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
|
expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -76,7 +79,7 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
devices: []*podresourcesapi.ContainerDevices{},
|
devices: []*podresourcesapi.ContainerDevices{},
|
||||||
cpus: []int64{},
|
cpus: cpuset.CPUSet{},
|
||||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||||
PodResources: []*podresourcesapi.PodResources{
|
PodResources: []*podresourcesapi.PodResources{
|
||||||
{
|
{
|
||||||
@ -121,7 +124,7 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Name: containerName,
|
Name: containerName,
|
||||||
Devices: devs,
|
Devices: devs,
|
||||||
CpuIds: cpus,
|
CpuIds: cpus.ToSliceNoSortInt64(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -140,9 +143,91 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("want err = %v, got %q", nil, err)
|
t.Errorf("want err = %v, got %q", nil, err)
|
||||||
}
|
}
|
||||||
if tc.expectedResponse.String() != resp.String() {
|
if !equalListResponse(tc.expectedResponse, resp) {
|
||||||
t.Errorf("want resp = %s, got %s", tc.expectedResponse.String(), resp.String())
|
t.Errorf("want resp = %s, got %s", tc.expectedResponse.String(), resp.String())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func equalListResponse(respA, respB *podresourcesapi.ListPodResourcesResponse) bool {
|
||||||
|
if len(respA.PodResources) != len(respB.PodResources) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for idx := 0; idx < len(respA.PodResources); idx++ {
|
||||||
|
podResA := respA.PodResources[idx]
|
||||||
|
podResB := respB.PodResources[idx]
|
||||||
|
if podResA.Name != podResB.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if podResA.Namespace != podResB.Namespace {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(podResA.Containers) != len(podResB.Containers) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for jdx := 0; jdx < len(podResA.Containers); jdx++ {
|
||||||
|
cntA := podResA.Containers[jdx]
|
||||||
|
cntB := podResB.Containers[jdx]
|
||||||
|
|
||||||
|
if cntA.Name != cntB.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !equalInt64s(cntA.CpuIds, cntB.CpuIds) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cntA.Devices) != len(cntB.Devices) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for kdx := 0; kdx < len(cntA.Devices); kdx++ {
|
||||||
|
cntDevA := cntA.Devices[kdx]
|
||||||
|
cntDevB := cntB.Devices[kdx]
|
||||||
|
|
||||||
|
if cntDevA.ResourceName != cntDevB.ResourceName {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !equalTopology(cntDevA.Topology, cntDevB.Topology) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !equalStrings(cntDevA.DeviceIds, cntDevB.DeviceIds) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalInt64s(a, b []int64) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
aCopy := append([]int64{}, a...)
|
||||||
|
sort.Slice(aCopy, func(i, j int) bool { return aCopy[i] < aCopy[j] })
|
||||||
|
bCopy := append([]int64{}, b...)
|
||||||
|
sort.Slice(bCopy, func(i, j int) bool { return bCopy[i] < bCopy[j] })
|
||||||
|
return reflect.DeepEqual(aCopy, bCopy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalStrings(a, b []string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
aCopy := append([]string{}, a...)
|
||||||
|
sort.Strings(aCopy)
|
||||||
|
bCopy := append([]string{}, b...)
|
||||||
|
sort.Strings(bCopy)
|
||||||
|
return reflect.DeepEqual(aCopy, bCopy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalTopology(a, b *podresourcesapi.TopologyInfo) bool {
|
||||||
|
if a == nil && b != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a != nil && b == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return reflect.DeepEqual(a, b)
|
||||||
|
}
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockProvider struct {
|
type mockProvider struct {
|
||||||
@ -43,9 +44,9 @@ func (m *mockProvider) GetDevices(podUID, containerName string) []*podresourcesv
|
|||||||
return args.Get(0).([]*podresourcesv1.ContainerDevices)
|
return args.Get(0).([]*podresourcesv1.ContainerDevices)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockProvider) GetCPUs(podUID, containerName string) []int64 {
|
func (m *mockProvider) GetCPUs(podUID, containerName string) cpuset.CPUSet {
|
||||||
args := m.Called(podUID, containerName)
|
args := m.Called(podUID, containerName)
|
||||||
return args.Get(0).([]int64)
|
return args.Get(0).(cpuset.CPUSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockProvider) UpdateAllocatedDevices() {
|
func (m *mockProvider) UpdateAllocatedDevices() {
|
||||||
|
@ -19,6 +19,7 @@ package podresources
|
|||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DevicesProvider knows how to provide the devices used by the given container
|
// DevicesProvider knows how to provide the devices used by the given container
|
||||||
@ -34,5 +35,5 @@ type PodsProvider interface {
|
|||||||
|
|
||||||
// CPUsProvider knows how to provide the cpus used by the given container
|
// CPUsProvider knows how to provide the cpus used by the given container
|
||||||
type CPUsProvider interface {
|
type CPUsProvider interface {
|
||||||
GetCPUs(podUID, containerName string) []int64
|
GetCPUs(podUID, containerName string) cpuset.CPUSet
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ type ContainerManager interface {
|
|||||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||||
|
|
||||||
// GetCPUs returns information about the cpus assigned to pods and containers
|
// GetCPUs returns information about the cpus assigned to pods and containers
|
||||||
GetCPUs(podUID, containerName string) []int64
|
GetCPUs(podUID, containerName string) cpuset.CPUSet
|
||||||
|
|
||||||
// ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed,
|
// ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed,
|
||||||
// due to node recreation.
|
// due to node recreation.
|
||||||
|
@ -52,6 +52,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
@ -1072,8 +1073,8 @@ func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podr
|
|||||||
return cm.deviceManager.GetDevices(podUID, containerName)
|
return cm.deviceManager.GetDevices(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
|
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) cpuset.CPUSet {
|
||||||
return cm.cpuManager.GetCPUs(podUID, containerName).ToSliceNoSortInt64()
|
return cm.cpuManager.GetCPUs(podUID, containerName).Clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
@ -126,8 +127,8 @@ func (cm *containerManagerStub) UpdateAllocatedDevices() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerStub) GetCPUs(_, _ string) []int64 {
|
func (cm *containerManagerStub) GetCPUs(_, _ string) cpuset.CPUSet {
|
||||||
return nil
|
return cpuset.CPUSet{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStubContainerManager() ContainerManager {
|
func NewStubContainerManager() ContainerManager {
|
||||||
|
@ -232,6 +232,6 @@ func (cm *containerManagerImpl) UpdateAllocatedDevices() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetCPUs(_, _ string) []int64 {
|
func (cm *containerManagerImpl) GetCPUs(_, _ string) cpuset.CPUSet {
|
||||||
return nil
|
return cpuset.CPUSet{}
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
@ -195,9 +196,9 @@ func (cm *FakeContainerManager) UpdateAllocatedDevices() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *FakeContainerManager) GetCPUs(_, _ string) []int64 {
|
func (cm *FakeContainerManager) GetCPUs(_, _ string) cpuset.CPUSet {
|
||||||
cm.Lock()
|
cm.Lock()
|
||||||
defer cm.Unlock()
|
defer cm.Unlock()
|
||||||
cm.CalledFunctions = append(cm.CalledFunctions, "GetCPUs")
|
cm.CalledFunctions = append(cm.CalledFunctions, "GetCPUs")
|
||||||
return nil
|
return cpuset.CPUSet{}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user