node: podresources: translate types in cm

during the review, we convened that the manager types
(CPUSet, ResourceDeviceInstances) should not cross the
containermanager API boundary; thus, the ContainerManager layer
is the correct place to do the type conversion

We push back the type conversions from the podresources server
layer, fixing tests accordingly.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2021-03-07 19:55:16 +01:00
parent ad68f9588c
commit 8afdf4f146
10 changed files with 136 additions and 166 deletions

View File

@ -19,7 +19,6 @@ package podresources
import (
"context"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubelet/pkg/apis/podresources/v1"
@ -60,8 +59,8 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
for j, container := range pod.Spec.Containers {
pRes.Containers[j] = &v1.ContainerResources{
Name: container.Name,
Devices: containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name).ToSliceNoSortInt64(),
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
}
}
podResources[i] = &pRes
@ -77,32 +76,7 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
return &v1.AllocatableResourcesResponse{
Devices: containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetAllocatableDevices()),
CpuIds: p.cpusProvider.GetAllocatableCPUs().ToSliceNoSortInt64(),
Devices: p.devicesProvider.GetAllocatableDevices(),
CpuIds: p.cpusProvider.GetAllocatableCPUs(),
}, nil
}
func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*v1.ContainerDevices {
var respDevs []*v1.ContainerDevices
for resourceName, resourceDevs := range devs {
for devID, dev := range resourceDevs {
for _, node := range dev.GetTopology().GetNodes() {
numaNode := node.GetID()
respDevs = append(respDevs, &v1.ContainerDevices{
ResourceName: resourceName,
DeviceIds: []string{devID},
Topology: &v1.TopologyInfo{
Nodes: []*v1.NUMANode{
{
ID: numaNode,
},
},
},
})
}
}
}
return respDevs
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
@ -38,35 +37,28 @@ func TestListPodResourcesV1(t *testing.T) {
containerName := "container-name"
numaID := int64(1)
devs := devicemanager.ResourceDeviceInstances{
"resource": devicemanager.DeviceInstances{
"dev0": pluginapi.Device{
Topology: &pluginapi.TopologyInfo{
Nodes: []*pluginapi.NUMANode{{ID: numaID}},
},
},
"dev1": pluginapi.Device{
Topology: &pluginapi.TopologyInfo{
Nodes: []*pluginapi.NUMANode{{ID: numaID}},
},
},
devs := []*podresourcesapi.ContainerDevices{
{
ResourceName: "resource",
DeviceIds: []string{"dev0", "dev1"},
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
}
cpus := cpuset.NewCPUSet(12, 23, 30)
cpus := []int64{12, 23, 30}
for _, tc := range []struct {
desc string
pods []*v1.Pod
devices devicemanager.ResourceDeviceInstances
cpus cpuset.CPUSet
devices []*podresourcesapi.ContainerDevices
cpus []int64
expectedResponse *podresourcesapi.ListPodResourcesResponse
}{
{
desc: "no pods",
pods: []*v1.Pod{},
devices: devicemanager.NewResourceDeviceInstances(),
cpus: cpuset.CPUSet{},
devices: []*podresourcesapi.ContainerDevices{},
cpus: []int64{},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
},
{
@ -87,8 +79,8 @@ func TestListPodResourcesV1(t *testing.T) {
},
},
},
devices: devicemanager.NewResourceDeviceInstances(),
cpus: cpuset.CPUSet{},
devices: []*podresourcesapi.ContainerDevices{},
cpus: []int64{},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{
{
@ -132,8 +124,8 @@ func TestListPodResourcesV1(t *testing.T) {
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: containerDevicesFromResourceDeviceInstances(devs),
CpuIds: cpus.ToSliceNoSortInt64(),
Devices: devs,
CpuIds: cpus,
},
},
},
@ -162,52 +154,51 @@ func TestListPodResourcesV1(t *testing.T) {
}
func TestAllocatableResources(t *testing.T) {
allDevs := devicemanager.ResourceDeviceInstances{
"resource": {
"dev0": {
ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e",
Health: "Healthy",
Topology: &pluginapi.TopologyInfo{
Nodes: []*pluginapi.NUMANode{
{
ID: 0,
},
allDevs := []*podresourcesapi.ContainerDevices{
{
ResourceName: "resource",
DeviceIds: []string{"dev0"},
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: 0,
},
},
},
"dev1": {
ID: "VF-8536e1e8-9dc6-4645-9aea-882db92e31e7",
Health: "Healthy",
Topology: &pluginapi.TopologyInfo{
Nodes: []*pluginapi.NUMANode{
{
ID: 1,
},
},
{
ResourceName: "resource",
DeviceIds: []string{"dev1"},
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: 1,
},
},
},
},
}
allCPUs := cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
allCPUs := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
for _, tc := range []struct {
desc string
allCPUs cpuset.CPUSet
allDevices devicemanager.ResourceDeviceInstances
allCPUs []int64
allDevices []*podresourcesapi.ContainerDevices
expectedAllocatableResourcesResponse *podresourcesapi.AllocatableResourcesResponse
}{
{
desc: "no devices, no CPUs",
allCPUs: cpuset.CPUSet{},
allDevices: devicemanager.NewResourceDeviceInstances(),
allCPUs: []int64{},
allDevices: []*podresourcesapi.ContainerDevices{},
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{},
},
{
desc: "no devices, all CPUs",
allCPUs: allCPUs,
allDevices: devicemanager.NewResourceDeviceInstances(),
allDevices: []*podresourcesapi.ContainerDevices{},
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
CpuIds: allCPUs.ToSliceNoSortInt64(),
CpuIds: allCPUs,
},
},
{
@ -215,7 +206,7 @@ func TestAllocatableResources(t *testing.T) {
allCPUs: allCPUs,
allDevices: allDevs,
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
CpuIds: allCPUs.ToSliceNoSortInt64(),
CpuIds: allCPUs,
Devices: []*podresourcesapi.ContainerDevices{
{
ResourceName: "resource",
@ -244,7 +235,7 @@ func TestAllocatableResources(t *testing.T) {
},
{
desc: "with devices, no CPUs",
allCPUs: cpuset.CPUSet{},
allCPUs: []int64{},
allDevices: allDevs,
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
Devices: []*podresourcesapi.ContainerDevices{
@ -277,7 +268,7 @@ func TestAllocatableResources(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
m := new(mockProvider)
m.On("GetDevices", "", "").Return([]*podresourcesapi.ContainerDevices{})
m.On("GetCPUs", "", "").Return(cpuset.CPUSet{})
m.On("GetCPUs", "", "").Return([]int64{})
m.On("UpdateAllocatedDevices").Return()
m.On("GetAllocatableDevices").Return(tc.allDevices)
m.On("GetAllocatableCPUs").Return(tc.allCPUs)
@ -335,10 +326,17 @@ func equalContainerDevices(devA, devB []*podresourcesapi.ContainerDevices) bool
return false
}
// the ordering of container devices in the response is not defined,
// so we need to do a full scan, failing at first mismatch
for idx := 0; idx < len(devA); idx++ {
if !containsContainerDevice(devA[idx], devB) {
cntDevA := devA[idx]
cntDevB := devB[idx]
if cntDevA.ResourceName != cntDevB.ResourceName {
return false
}
if !equalTopology(cntDevA.Topology, cntDevB.Topology) {
return false
}
if !equalStrings(cntDevA.DeviceIds, cntDevB.DeviceIds) {
return false
}
}
@ -346,28 +344,6 @@ func equalContainerDevices(devA, devB []*podresourcesapi.ContainerDevices) bool
return true
}
func containsContainerDevice(cntDev *podresourcesapi.ContainerDevices, devs []*podresourcesapi.ContainerDevices) bool {
for idx := 0; idx < len(devs); idx++ {
if equalContainerDevice(cntDev, devs[idx]) {
return true
}
}
return false
}
func equalContainerDevice(cntDevA, cntDevB *podresourcesapi.ContainerDevices) bool {
if cntDevA.ResourceName != cntDevB.ResourceName {
return false
}
if !equalTopology(cntDevA.Topology, cntDevB.Topology) {
return false
}
if !equalStrings(cntDevA.DeviceIds, cntDevB.DeviceIds) {
return false
}
return true
}
func equalInt64s(a, b []int64) bool {
if len(a) != len(b) {
return false

View File

@ -68,10 +68,9 @@ func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.Lis
}
for j, container := range pod.Spec.Containers {
v1devices := containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetDevices(string(pod.UID), container.Name))
pRes.Containers[j] = &v1alpha1.ContainerResources{
Name: container.Name,
Devices: v1DevicesToAlphaV1(v1devices),
Devices: v1DevicesToAlphaV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
}
}
podResources[i] = &pRes

View File

@ -25,10 +25,8 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
)
type mockProvider struct {
@ -40,28 +38,28 @@ func (m *mockProvider) GetPods() []*v1.Pod {
return args.Get(0).([]*v1.Pod)
}
func (m *mockProvider) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
func (m *mockProvider) GetDevices(podUID, containerName string) []*podresourcesv1.ContainerDevices {
args := m.Called(podUID, containerName)
return args.Get(0).(devicemanager.ResourceDeviceInstances)
return args.Get(0).([]*podresourcesv1.ContainerDevices)
}
func (m *mockProvider) GetCPUs(podUID, containerName string) cpuset.CPUSet {
func (m *mockProvider) GetCPUs(podUID, containerName string) []int64 {
args := m.Called(podUID, containerName)
return args.Get(0).(cpuset.CPUSet)
return args.Get(0).([]int64)
}
func (m *mockProvider) UpdateAllocatedDevices() {
m.Called()
}
func (m *mockProvider) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
func (m *mockProvider) GetAllocatableDevices() []*podresourcesv1.ContainerDevices {
args := m.Called()
return args.Get(0).(devicemanager.ResourceDeviceInstances)
return args.Get(0).([]*podresourcesv1.ContainerDevices)
}
func (m *mockProvider) GetAllocatableCPUs() cpuset.CPUSet {
func (m *mockProvider) GetAllocatableCPUs() []int64 {
args := m.Called()
return args.Get(0).(cpuset.CPUSet)
return args.Get(0).([]int64)
}
func TestListPodResourcesV1alpha1(t *testing.T) {
@ -70,23 +68,23 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
podUID := types.UID("pod-uid")
containerName := "container-name"
devs := devicemanager.ResourceDeviceInstances{
"resource": devicemanager.DeviceInstances{
"dev0": pluginapi.Device{},
"dev1": pluginapi.Device{},
devs := []*podresourcesv1.ContainerDevices{
{
ResourceName: "resource",
DeviceIds: []string{"dev0", "dev1"},
},
}
for _, tc := range []struct {
desc string
pods []*v1.Pod
devices devicemanager.ResourceDeviceInstances
devices []*podresourcesv1.ContainerDevices
expectedResponse *v1alpha1.ListPodResourcesResponse
}{
{
desc: "no pods",
pods: []*v1.Pod{},
devices: devicemanager.NewResourceDeviceInstances(),
devices: []*podresourcesv1.ContainerDevices{},
expectedResponse: &v1alpha1.ListPodResourcesResponse{},
},
{
@ -107,7 +105,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
},
},
},
devices: devicemanager.NewResourceDeviceInstances(),
devices: []*podresourcesv1.ContainerDevices{},
expectedResponse: &v1alpha1.ListPodResourcesResponse{
PodResources: []*v1alpha1.PodResources{
{
@ -150,7 +148,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
Containers: []*v1alpha1.ContainerResources{
{
Name: containerName,
Devices: v1DevicesToAlphaV1(containerDevicesFromResourceDeviceInstances(devs)),
Devices: v1DevicesToAlphaV1(devs),
},
},
},

View File

@ -18,8 +18,7 @@ package podresources
import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
)
// DevicesProvider knows how to provide the devices used by the given container
@ -27,9 +26,9 @@ type DevicesProvider interface {
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
UpdateAllocatedDevices()
// GetDevices returns information about the devices assigned to pods and containers
GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
// GetAllocatableDevices returns information about all the devices known to the manager
GetAllocatableDevices() devicemanager.ResourceDeviceInstances
GetAllocatableDevices() []*podresourcesapi.ContainerDevices
}
// PodsProvider knows how to provide the pods admitted by the node
@ -40,7 +39,7 @@ type PodsProvider interface {
// CPUsProvider knows how to provide the cpus used by the given container
type CPUsProvider interface {
// GetCPUs returns information about the cpus assigned to pods and containers
GetCPUs(podUID, containerName string) cpuset.CPUSet
GetCPUs(podUID, containerName string) []int64
// GetAllocatableCPUs returns the allocatable (not allocated) CPUs
GetAllocatableCPUs() cpuset.CPUSet
GetAllocatableCPUs() []int64
}

View File

@ -26,9 +26,11 @@ import (
// TODO: Migrate kubelet to either use its own internal objects or client library.
v1 "k8s.io/api/core/v1"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
@ -186,3 +188,28 @@ func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) {
}
return &reservations, nil
}
func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*podresourcesapi.ContainerDevices {
var respDevs []*podresourcesapi.ContainerDevices
for resourceName, resourceDevs := range devs {
for devID, dev := range resourceDevs {
for _, node := range dev.GetTopology().GetNodes() {
numaNode := node.GetID()
respDevs = append(respDevs, &podresourcesapi.ContainerDevices{
ResourceName: resourceName,
DeviceIds: []string{devID},
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: numaNode,
},
},
},
})
}
}
}
return respDevs
}

View File

@ -47,11 +47,11 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
@ -1068,20 +1068,20 @@ func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceLi
return cm.deviceManager.GetCapacity()
}
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
return cm.deviceManager.GetDevices(podUID, containerName)
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetDevices(podUID, containerName))
}
func (cm *containerManagerImpl) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
return cm.deviceManager.GetAllocatableDevices()
func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetAllocatableDevices())
}
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) cpuset.CPUSet {
return cm.cpuManager.GetCPUs(podUID, containerName).Clone()
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
return cm.cpuManager.GetCPUs(podUID, containerName).ToSliceNoSortInt64()
}
func (cm *containerManagerImpl) GetAllocatableCPUs() cpuset.CPUSet {
return cm.cpuManager.GetAllocatableCPUs()
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
return cm.cpuManager.GetAllocatableCPUs().ToSliceNoSortInt64()
}
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {

View File

@ -22,9 +22,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -111,11 +110,11 @@ func (cm *containerManagerStub) GetPodCgroupRoot() string {
return ""
}
func (cm *containerManagerStub) GetDevices(_, _ string) devicemanager.ResourceDeviceInstances {
func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
return nil
}
func (cm *containerManagerStub) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
func (cm *containerManagerStub) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
return nil
}
@ -131,12 +130,12 @@ func (cm *containerManagerStub) UpdateAllocatedDevices() {
return
}
func (cm *containerManagerStub) GetCPUs(_, _ string) cpuset.CPUSet {
return cpuset.CPUSet{}
func (cm *containerManagerStub) GetCPUs(_, _ string) []int64 {
return nil
}
func (cm *containerManagerStub) GetAllocatableCPUs() cpuset.CPUSet {
return cpuset.CPUSet{}
func (cm *containerManagerStub) GetAllocatableCPUs() []int64 {
return nil
}
func NewStubContainerManager() ContainerManager {

View File

@ -32,10 +32,10 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
@ -216,11 +216,11 @@ func (cm *containerManagerImpl) GetPodCgroupRoot() string {
return ""
}
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
return cm.deviceManager.GetDevices(podUID, containerName)
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetDevices(podUID, containerName))
}
func (cm *containerManagerImpl) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
return nil
}
@ -236,10 +236,10 @@ func (cm *containerManagerImpl) UpdateAllocatedDevices() {
return
}
func (cm *containerManagerImpl) GetCPUs(_, _ string) cpuset.CPUSet {
return cpuset.CPUSet{}
func (cm *containerManagerImpl) GetCPUs(_, _ string) []int64 {
return nil
}
func (cm *containerManagerImpl) GetAllocatableCPUs() cpuset.CPUSet {
return cpuset.CPUSet{}
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
return nil
}

View File

@ -23,9 +23,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -168,14 +167,14 @@ func (cm *FakeContainerManager) GetPodCgroupRoot() string {
return ""
}
func (cm *FakeContainerManager) GetDevices(_, _ string) devicemanager.ResourceDeviceInstances {
func (cm *FakeContainerManager) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetDevices")
return nil
}
func (cm *FakeContainerManager) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
func (cm *FakeContainerManager) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices")
@ -203,16 +202,15 @@ func (cm *FakeContainerManager) UpdateAllocatedDevices() {
return
}
func (cm *FakeContainerManager) GetCPUs(_, _ string) cpuset.CPUSet {
func (cm *FakeContainerManager) GetCPUs(_, _ string) []int64 {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetCPUs")
return cpuset.CPUSet{}
return nil
}
func (cm *FakeContainerManager) GetAllocatableCPUs() cpuset.CPUSet {
func (cm *FakeContainerManager) GetAllocatableCPUs() []int64 {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableCPUs")
return cpuset.CPUSet{}
return nil
}