node: podresources: make GetDevices() consistent

We want to make the return type of the GetDevices() method of the
podresources DevicesProvider interface consistent with
the newly added GetAllocatableDevices type.
This makes the code easier to read and reduces the coupling between
the podresourcesapi server and the devicemanager code.

No intended changes in behaviour, but the different return types
now requires some data massaging. Tests are updated accordingly.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2021-03-05 17:20:10 +01:00
parent 6d33354e4c
commit ad68f9588c
16 changed files with 123 additions and 95 deletions

View File

@ -19,6 +19,7 @@ package podresources
import ( import (
"context" "context"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubelet/pkg/apis/podresources/v1" "k8s.io/kubelet/pkg/apis/podresources/v1"
@ -59,7 +60,7 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
for j, container := range pod.Spec.Containers { for j, container := range pod.Spec.Containers {
pRes.Containers[j] = &v1.ContainerResources{ pRes.Containers[j] = &v1.ContainerResources{
Name: container.Name, Name: container.Name,
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), Devices: containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name).ToSliceNoSortInt64(), CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name).ToSliceNoSortInt64(),
} }
} }
@ -75,10 +76,16 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *v1.AllocatableResourcesRequest) (*v1.AllocatableResourcesResponse, error) { func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *v1.AllocatableResourcesRequest) (*v1.AllocatableResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
allDevices := p.devicesProvider.GetAllocatableDevices() return &v1.AllocatableResourcesResponse{
Devices: containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetAllocatableDevices()),
CpuIds: p.cpusProvider.GetAllocatableCPUs().ToSliceNoSortInt64(),
}, nil
}
func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*v1.ContainerDevices {
var respDevs []*v1.ContainerDevices var respDevs []*v1.ContainerDevices
for resourceName, resourceDevs := range allDevices { for resourceName, resourceDevs := range devs {
for devID, dev := range resourceDevs { for devID, dev := range resourceDevs {
for _, node := range dev.GetTopology().GetNodes() { for _, node := range dev.GetTopology().GetNodes() {
numaNode := node.GetID() numaNode := node.GetID()
@ -87,7 +94,9 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
DeviceIds: []string{devID}, DeviceIds: []string{devID},
Topology: &v1.TopologyInfo{ Topology: &v1.TopologyInfo{
Nodes: []*v1.NUMANode{ Nodes: []*v1.NUMANode{
{ID: numaNode}, {
ID: numaNode,
},
}, },
}, },
}) })
@ -95,8 +104,5 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
} }
} }
return &v1.AllocatableResourcesResponse{ return respDevs
Devices: respDevs,
CpuIds: p.cpusProvider.GetAllocatableCPUs().ToSliceNoSortInt64(),
}, nil
} }

View File

@ -38,11 +38,18 @@ func TestListPodResourcesV1(t *testing.T) {
containerName := "container-name" containerName := "container-name"
numaID := int64(1) numaID := int64(1)
devs := []*podresourcesapi.ContainerDevices{ devs := devicemanager.ResourceDeviceInstances{
{ "resource": devicemanager.DeviceInstances{
ResourceName: "resource", "dev0": pluginapi.Device{
DeviceIds: []string{"dev0", "dev1"}, Topology: &pluginapi.TopologyInfo{
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}}, Nodes: []*pluginapi.NUMANode{{ID: numaID}},
},
},
"dev1": pluginapi.Device{
Topology: &pluginapi.TopologyInfo{
Nodes: []*pluginapi.NUMANode{{ID: numaID}},
},
},
}, },
} }
@ -51,14 +58,14 @@ func TestListPodResourcesV1(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
desc string desc string
pods []*v1.Pod pods []*v1.Pod
devices []*podresourcesapi.ContainerDevices devices devicemanager.ResourceDeviceInstances
cpus cpuset.CPUSet cpus cpuset.CPUSet
expectedResponse *podresourcesapi.ListPodResourcesResponse expectedResponse *podresourcesapi.ListPodResourcesResponse
}{ }{
{ {
desc: "no pods", desc: "no pods",
pods: []*v1.Pod{}, pods: []*v1.Pod{},
devices: []*podresourcesapi.ContainerDevices{}, devices: devicemanager.NewResourceDeviceInstances(),
cpus: cpuset.CPUSet{}, cpus: cpuset.CPUSet{},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{}, expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
}, },
@ -80,7 +87,7 @@ func TestListPodResourcesV1(t *testing.T) {
}, },
}, },
}, },
devices: []*podresourcesapi.ContainerDevices{}, devices: devicemanager.NewResourceDeviceInstances(),
cpus: cpuset.CPUSet{}, cpus: cpuset.CPUSet{},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{ expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{ PodResources: []*podresourcesapi.PodResources{
@ -125,7 +132,7 @@ func TestListPodResourcesV1(t *testing.T) {
Containers: []*podresourcesapi.ContainerResources{ Containers: []*podresourcesapi.ContainerResources{
{ {
Name: containerName, Name: containerName,
Devices: devs, Devices: containerDevicesFromResourceDeviceInstances(devs),
CpuIds: cpus.ToSliceNoSortInt64(), CpuIds: cpus.ToSliceNoSortInt64(),
}, },
}, },

View File

@ -68,9 +68,10 @@ func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.Lis
} }
for j, container := range pod.Spec.Containers { for j, container := range pod.Spec.Containers {
v1devices := containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetDevices(string(pod.UID), container.Name))
pRes.Containers[j] = &v1alpha1.ContainerResources{ pRes.Containers[j] = &v1alpha1.ContainerResources{
Name: container.Name, Name: container.Name,
Devices: v1DevicesToAlphaV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)), Devices: v1DevicesToAlphaV1(v1devices),
} }
} }
podResources[i] = &pRes podResources[i] = &pRes

View File

@ -25,7 +25,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1" pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1" "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
@ -40,9 +40,9 @@ func (m *mockProvider) GetPods() []*v1.Pod {
return args.Get(0).([]*v1.Pod) return args.Get(0).([]*v1.Pod)
} }
func (m *mockProvider) GetDevices(podUID, containerName string) []*podresourcesv1.ContainerDevices { func (m *mockProvider) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
args := m.Called(podUID, containerName) args := m.Called(podUID, containerName)
return args.Get(0).([]*podresourcesv1.ContainerDevices) return args.Get(0).(devicemanager.ResourceDeviceInstances)
} }
func (m *mockProvider) GetCPUs(podUID, containerName string) cpuset.CPUSet { func (m *mockProvider) GetCPUs(podUID, containerName string) cpuset.CPUSet {
@ -70,23 +70,23 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
podUID := types.UID("pod-uid") podUID := types.UID("pod-uid")
containerName := "container-name" containerName := "container-name"
devs := []*podresourcesv1.ContainerDevices{ devs := devicemanager.ResourceDeviceInstances{
{ "resource": devicemanager.DeviceInstances{
ResourceName: "resource", "dev0": pluginapi.Device{},
DeviceIds: []string{"dev0", "dev1"}, "dev1": pluginapi.Device{},
}, },
} }
for _, tc := range []struct { for _, tc := range []struct {
desc string desc string
pods []*v1.Pod pods []*v1.Pod
devices []*podresourcesv1.ContainerDevices devices devicemanager.ResourceDeviceInstances
expectedResponse *v1alpha1.ListPodResourcesResponse expectedResponse *v1alpha1.ListPodResourcesResponse
}{ }{
{ {
desc: "no pods", desc: "no pods",
pods: []*v1.Pod{}, pods: []*v1.Pod{},
devices: []*podresourcesv1.ContainerDevices{}, devices: devicemanager.NewResourceDeviceInstances(),
expectedResponse: &v1alpha1.ListPodResourcesResponse{}, expectedResponse: &v1alpha1.ListPodResourcesResponse{},
}, },
{ {
@ -107,7 +107,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
}, },
}, },
}, },
devices: []*podresourcesv1.ContainerDevices{}, devices: devicemanager.NewResourceDeviceInstances(),
expectedResponse: &v1alpha1.ListPodResourcesResponse{ expectedResponse: &v1alpha1.ListPodResourcesResponse{
PodResources: []*v1alpha1.PodResources{ PodResources: []*v1alpha1.PodResources{
{ {
@ -150,7 +150,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
Containers: []*v1alpha1.ContainerResources{ Containers: []*v1alpha1.ContainerResources{
{ {
Name: containerName, Name: containerName,
Devices: v1DevicesToAlphaV1(devs), Devices: v1DevicesToAlphaV1(containerDevicesFromResourceDeviceInstances(devs)),
}, },
}, },
}, },

View File

@ -18,7 +18,6 @@ package podresources
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
) )
@ -28,7 +27,7 @@ type DevicesProvider interface {
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods. // UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
UpdateAllocatedDevices() UpdateAllocatedDevices()
// GetDevices returns information about the devices assigned to pods and containers // GetDevices returns information about the devices assigned to pods and containers
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances
// GetAllocatableDevices returns information about all the devices known to the manager // GetAllocatableDevices returns information about all the devices known to the manager
GetAllocatableDevices() devicemanager.ResourceDeviceInstances GetAllocatableDevices() devicemanager.ResourceDeviceInstances
} }

View File

@ -47,7 +47,6 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/containermap"
@ -1069,7 +1068,7 @@ func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceLi
return cm.deviceManager.GetCapacity() return cm.deviceManager.GetCapacity()
} }
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices { func (cm *containerManagerImpl) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
return cm.deviceManager.GetDevices(podUID, containerName) return cm.deviceManager.GetDevices(podUID, containerName)
} }

View File

@ -22,7 +22,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
@ -112,7 +111,11 @@ func (cm *containerManagerStub) GetPodCgroupRoot() string {
return "" return ""
} }
func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { func (cm *containerManagerStub) GetDevices(_, _ string) devicemanager.ResourceDeviceInstances {
return nil
}
func (cm *containerManagerStub) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
return nil return nil
} }
@ -132,10 +135,6 @@ func (cm *containerManagerStub) GetCPUs(_, _ string) cpuset.CPUSet {
return cpuset.CPUSet{} return cpuset.CPUSet{}
} }
func (cm *containerManagerStub) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
return nil
}
func (cm *containerManagerStub) GetAllocatableCPUs() cpuset.CPUSet { func (cm *containerManagerStub) GetAllocatableCPUs() cpuset.CPUSet {
return cpuset.CPUSet{} return cpuset.CPUSet{}
} }

View File

@ -32,7 +32,6 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
@ -217,10 +216,14 @@ func (cm *containerManagerImpl) GetPodCgroupRoot() string {
return "" return ""
} }
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices { func (cm *containerManagerImpl) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
return cm.deviceManager.GetDevices(podUID, containerName) return cm.deviceManager.GetDevices(podUID, containerName)
} }
func (cm *containerManagerImpl) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
return nil
}
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
return cm.deviceManager.ShouldResetExtendedResourceCapacity() return cm.deviceManager.ShouldResetExtendedResourceCapacity()
} }
@ -240,7 +243,3 @@ func (cm *containerManagerImpl) GetCPUs(_, _ string) cpuset.CPUSet {
func (cm *containerManagerImpl) GetAllocatableCPUs() cpuset.CPUSet { func (cm *containerManagerImpl) GetAllocatableCPUs() cpuset.CPUSet {
return cpuset.CPUSet{} return cpuset.CPUSet{}
} }
func (cm *containerManagerImpl) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
return nil
}

View File

@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
@ -152,7 +151,7 @@ func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffi
socketname: file, socketname: file,
socketdir: dir, socketdir: dir,
allDevices: make(map[string]map[string]pluginapi.Device), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String),
@ -1078,7 +1077,7 @@ func (m *ManagerImpl) GetAllocatableDevices() ResourceDeviceInstances {
} }
// GetDevices returns the devices used by the specified container // GetDevices returns the devices used by the specified container
func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices { func (m *ManagerImpl) GetDevices(podUID, containerName string) ResourceDeviceInstances {
return m.podDevices.getContainerDevices(podUID, containerName) return m.podDevices.getContainerDevices(podUID, containerName)
} }

View File

@ -18,7 +18,6 @@ package devicemanager
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -80,7 +79,12 @@ func (h *ManagerStub) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
} }
// GetDevices returns nil // GetDevices returns nil
func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { func (h *ManagerStub) GetDevices(_, _ string) ResourceDeviceInstances {
return nil
}
// GetAllocatableDevices returns nothing
func (h *ManagerStub) GetAllocatableDevices() ResourceDeviceInstances {
return nil return nil
} }
@ -93,8 +97,3 @@ func (h *ManagerStub) ShouldResetExtendedResourceCapacity() bool {
func (h *ManagerStub) UpdateAllocatedDevices() { func (h *ManagerStub) UpdateAllocatedDevices() {
return return
} }
// GetAllocatableDevices returns nothing
func (h *ManagerStub) GetAllocatableDevices() ResourceDeviceInstances {
return nil
}

View File

@ -622,7 +622,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
activePods: activePods, activePods: activePods,
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
checkpointManager: ckm, checkpointManager: ckm,
allDevices: make(map[string]map[string]pluginapi.Device), allDevices: NewResourceDeviceInstances(),
} }
for _, res := range testRes { for _, res := range testRes {

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
) )
@ -324,7 +323,7 @@ func (pdev *podDevices) deviceRunContainerOptions(podUID, contName string) *Devi
} }
// getContainerDevices returns the devices assigned to the provided container for all ResourceNames // getContainerDevices returns the devices assigned to the provided container for all ResourceNames
func (pdev *podDevices) getContainerDevices(podUID, contName string) []*podresourcesapi.ContainerDevices { func (pdev *podDevices) getContainerDevices(podUID, contName string) ResourceDeviceInstances {
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
@ -334,21 +333,39 @@ func (pdev *podDevices) getContainerDevices(podUID, contName string) []*podresou
if _, contExists := pdev.devs[podUID][contName]; !contExists { if _, contExists := pdev.devs[podUID][contName]; !contExists {
return nil return nil
} }
cDev := []*podresourcesapi.ContainerDevices{} resDev := NewResourceDeviceInstances()
for resource, allocateInfo := range pdev.devs[podUID][contName] { for resource, allocateInfo := range pdev.devs[podUID][contName] {
for numaid, devlist := range allocateInfo.deviceIds { if len(allocateInfo.deviceIds) == 0 {
cDev = append(cDev, &podresourcesapi.ContainerDevices{ continue
ResourceName: resource,
DeviceIds: devlist,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaid}}},
})
} }
devicePluginMap := make(map[string]pluginapi.Device)
for numaid, devlist := range allocateInfo.deviceIds {
for _, devId := range devlist {
NUMANodes := []*pluginapi.NUMANode{{ID: numaid}}
if pDev, ok := devicePluginMap[devId]; ok && pDev.Topology != nil {
if nodes := pDev.Topology.GetNodes(); nodes != nil {
NUMANodes = append(NUMANodes, nodes...)
}
}
devicePluginMap[devId] = pluginapi.Device{
// ID and Healthy are not relevant here.
Topology: &pluginapi.TopologyInfo{
Nodes: NUMANodes,
},
}
}
}
resDev[resource] = devicePluginMap
} }
return cDev return resDev
} }
// ResourceDeviceInstances is a map ping resource name -> device name -> device data // DeviceInstances is a mapping device name -> plugin device data
type ResourceDeviceInstances map[string]map[string]pluginapi.Device type DeviceInstances map[string]pluginapi.Device
// ResourceDeviceInstances is a mapping resource name -> DeviceInstances
type ResourceDeviceInstances map[string]DeviceInstances
func NewResourceDeviceInstances() ResourceDeviceInstances { func NewResourceDeviceInstances() ResourceDeviceInstances {
return make(ResourceDeviceInstances) return make(ResourceDeviceInstances)

View File

@ -35,13 +35,18 @@ func TestGetContainerDevices(t *testing.T) {
devices, devices,
constructAllocResp(map[string]string{"/dev/r1dev1": "/dev/r1dev1", "/dev/r1dev2": "/dev/r1dev2"}, map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{})) constructAllocResp(map[string]string{"/dev/r1dev1": "/dev/r1dev1", "/dev/r1dev2": "/dev/r1dev2"}, map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{}))
contDevices := podDevices.getContainerDevices(podID, contID) resContDevices := podDevices.getContainerDevices(podID, contID)
require.Equal(t, len(devices), len(contDevices), "Incorrect container devices") contDevices, ok := resContDevices[resourceName1]
for _, contDev := range contDevices { require.True(t, ok, "resource %q not present", resourceName1)
for _, node := range contDev.Topology.Nodes {
for devId, plugInfo := range contDevices {
nodes := plugInfo.GetTopology().GetNodes()
require.Equal(t, len(nodes), len(devices), "Incorrect container devices: %v - %v (nodes %v)", devices, contDevices, nodes)
for _, node := range plugInfo.GetTopology().GetNodes() {
dev, ok := devices[node.ID] dev, ok := devices[node.ID]
require.True(t, ok, "NUMA id %v doesn't exist in result", node.ID) require.True(t, ok, "NUMA id %v doesn't exist in result", node.ID)
require.Equal(t, contDev.DeviceIds[0], dev[0], "Can't find device %s in result", dev[0]) require.Equal(t, devId, dev[0], "Can't find device %s in result", dev[0])
} }
} }
} }

View File

@ -56,7 +56,7 @@ func TestGetTopologyHints(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: make(map[string]map[string]pluginapi.Device), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String),
podDevices: newPodDevices(), podDevices: newPodDevices(),
@ -66,7 +66,7 @@ func TestGetTopologyHints(t *testing.T) {
} }
for r := range tc.devices { for r := range tc.devices {
m.allDevices[r] = make(map[string]pluginapi.Device) m.allDevices[r] = make(DeviceInstances)
m.healthyDevices[r] = sets.NewString() m.healthyDevices[r] = sets.NewString()
for _, d := range tc.devices[r] { for _, d := range tc.devices[r] {
@ -409,7 +409,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
} }
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: make(map[string]map[string]pluginapi.Device), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String),
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
@ -419,7 +419,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
topologyAffinityStore: &mockAffinityStore{tc.hint}, topologyAffinityStore: &mockAffinityStore{tc.hint},
} }
m.allDevices[tc.resource] = make(map[string]pluginapi.Device) m.allDevices[tc.resource] = make(DeviceInstances)
m.healthyDevices[tc.resource] = sets.NewString() m.healthyDevices[tc.resource] = sets.NewString()
m.endpoints[tc.resource] = endpointInfo{} m.endpoints[tc.resource] = endpointInfo{}
@ -598,7 +598,7 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
} }
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: make(map[string]map[string]pluginapi.Device), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String),
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
@ -608,7 +608,7 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
topologyAffinityStore: &mockAffinityStore{tc.hint}, topologyAffinityStore: &mockAffinityStore{tc.hint},
} }
m.allDevices[tc.resource] = make(map[string]pluginapi.Device) m.allDevices[tc.resource] = make(DeviceInstances)
m.healthyDevices[tc.resource] = sets.NewString() m.healthyDevices[tc.resource] = sets.NewString()
for _, d := range tc.allDevices { for _, d := range tc.allDevices {
m.allDevices[tc.resource][d.ID] = d m.allDevices[tc.resource][d.ID] = d
@ -920,7 +920,7 @@ func TestGetPodTopologyHints(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: make(map[string]map[string]pluginapi.Device), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String),
podDevices: newPodDevices(), podDevices: newPodDevices(),
@ -930,7 +930,7 @@ func TestGetPodTopologyHints(t *testing.T) {
} }
for r := range tc.devices { for r := range tc.devices {
m.allDevices[r] = make(map[string]pluginapi.Device) m.allDevices[r] = make(DeviceInstances)
m.healthyDevices[r] = sets.NewString() m.healthyDevices[r] = sets.NewString()
for _, d := range tc.devices[r] { for _, d := range tc.devices[r] {

View File

@ -20,7 +20,6 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -60,7 +59,10 @@ type Manager interface {
GetWatcherHandler() cache.PluginHandler GetWatcherHandler() cache.PluginHandler
// GetDevices returns information about the devices assigned to pods and containers // GetDevices returns information about the devices assigned to pods and containers
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices GetDevices(podUID, containerName string) ResourceDeviceInstances
// GetAllocatableDevices returns information about all the devices known to the manager
GetAllocatableDevices() ResourceDeviceInstances
// ShouldResetExtendedResourceCapacity returns whether the extended resources should be reset or not, // ShouldResetExtendedResourceCapacity returns whether the extended resources should be reset or not,
// depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates // depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates
@ -77,9 +79,6 @@ type Manager interface {
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods. // UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
UpdateAllocatedDevices() UpdateAllocatedDevices()
// GetAllocatableDevices returns information about all the devices known to the manager
GetAllocatableDevices() ResourceDeviceInstances
} }
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
@ -169,13 +168,20 @@ func (cm *FakeContainerManager) GetPodCgroupRoot() string {
return "" return ""
} }
func (cm *FakeContainerManager) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { func (cm *FakeContainerManager) GetDevices(_, _ string) devicemanager.ResourceDeviceInstances {
cm.Lock() cm.Lock()
defer cm.Unlock() defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetDevices") cm.CalledFunctions = append(cm.CalledFunctions, "GetDevices")
return nil return nil
} }
func (cm *FakeContainerManager) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices")
return nil
}
func (cm *FakeContainerManager) ShouldResetExtendedResourceCapacity() bool { func (cm *FakeContainerManager) ShouldResetExtendedResourceCapacity() bool {
cm.Lock() cm.Lock()
defer cm.Unlock() defer cm.Unlock()
@ -204,13 +210,6 @@ func (cm *FakeContainerManager) GetCPUs(_, _ string) cpuset.CPUSet {
return cpuset.CPUSet{} return cpuset.CPUSet{}
} }
func (cm *FakeContainerManager) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices")
return nil
}
func (cm *FakeContainerManager) GetAllocatableCPUs() cpuset.CPUSet { func (cm *FakeContainerManager) GetAllocatableCPUs() cpuset.CPUSet {
cm.Lock() cm.Lock()
defer cm.Unlock() defer cm.Unlock()