mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
node: podresources: make GetDevices() consistent
We want to make the return type of the GetDevices() method of the podresources DevicesProvider interface consistent with the newly added GetAllocatableDevices type. This makes the code easier to read and reduces the coupling between the podresourcesapi server and the devicemanager code. No intended changes in behaviour, but the different return types now requires some data massaging. Tests are updated accordingly. Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
parent
6d33354e4c
commit
ad68f9588c
@ -19,6 +19,7 @@ package podresources
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
|
||||
"k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
@ -59,7 +60,7 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
|
||||
for j, container := range pod.Spec.Containers {
|
||||
pRes.Containers[j] = &v1.ContainerResources{
|
||||
Name: container.Name,
|
||||
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
|
||||
Devices: containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
|
||||
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name).ToSliceNoSortInt64(),
|
||||
}
|
||||
}
|
||||
@ -75,10 +76,16 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
|
||||
func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *v1.AllocatableResourcesRequest) (*v1.AllocatableResourcesResponse, error) {
|
||||
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
|
||||
|
||||
allDevices := p.devicesProvider.GetAllocatableDevices()
|
||||
return &v1.AllocatableResourcesResponse{
|
||||
Devices: containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetAllocatableDevices()),
|
||||
CpuIds: p.cpusProvider.GetAllocatableCPUs().ToSliceNoSortInt64(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*v1.ContainerDevices {
|
||||
var respDevs []*v1.ContainerDevices
|
||||
|
||||
for resourceName, resourceDevs := range allDevices {
|
||||
for resourceName, resourceDevs := range devs {
|
||||
for devID, dev := range resourceDevs {
|
||||
for _, node := range dev.GetTopology().GetNodes() {
|
||||
numaNode := node.GetID()
|
||||
@ -87,7 +94,9 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
|
||||
DeviceIds: []string{devID},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{ID: numaNode},
|
||||
{
|
||||
ID: numaNode,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
@ -95,8 +104,5 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
|
||||
}
|
||||
}
|
||||
|
||||
return &v1.AllocatableResourcesResponse{
|
||||
Devices: respDevs,
|
||||
CpuIds: p.cpusProvider.GetAllocatableCPUs().ToSliceNoSortInt64(),
|
||||
}, nil
|
||||
return respDevs
|
||||
}
|
||||
|
@ -38,11 +38,18 @@ func TestListPodResourcesV1(t *testing.T) {
|
||||
containerName := "container-name"
|
||||
numaID := int64(1)
|
||||
|
||||
devs := []*podresourcesapi.ContainerDevices{
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev0", "dev1"},
|
||||
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
|
||||
devs := devicemanager.ResourceDeviceInstances{
|
||||
"resource": devicemanager.DeviceInstances{
|
||||
"dev0": pluginapi.Device{
|
||||
Topology: &pluginapi.TopologyInfo{
|
||||
Nodes: []*pluginapi.NUMANode{{ID: numaID}},
|
||||
},
|
||||
},
|
||||
"dev1": pluginapi.Device{
|
||||
Topology: &pluginapi.TopologyInfo{
|
||||
Nodes: []*pluginapi.NUMANode{{ID: numaID}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -51,14 +58,14 @@ func TestListPodResourcesV1(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
pods []*v1.Pod
|
||||
devices []*podresourcesapi.ContainerDevices
|
||||
devices devicemanager.ResourceDeviceInstances
|
||||
cpus cpuset.CPUSet
|
||||
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
||||
}{
|
||||
{
|
||||
desc: "no pods",
|
||||
pods: []*v1.Pod{},
|
||||
devices: []*podresourcesapi.ContainerDevices{},
|
||||
devices: devicemanager.NewResourceDeviceInstances(),
|
||||
cpus: cpuset.CPUSet{},
|
||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
|
||||
},
|
||||
@ -80,7 +87,7 @@ func TestListPodResourcesV1(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
devices: []*podresourcesapi.ContainerDevices{},
|
||||
devices: devicemanager.NewResourceDeviceInstances(),
|
||||
cpus: cpuset.CPUSet{},
|
||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||
PodResources: []*podresourcesapi.PodResources{
|
||||
@ -125,7 +132,7 @@ func TestListPodResourcesV1(t *testing.T) {
|
||||
Containers: []*podresourcesapi.ContainerResources{
|
||||
{
|
||||
Name: containerName,
|
||||
Devices: devs,
|
||||
Devices: containerDevicesFromResourceDeviceInstances(devs),
|
||||
CpuIds: cpus.ToSliceNoSortInt64(),
|
||||
},
|
||||
},
|
||||
|
@ -68,9 +68,10 @@ func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.Lis
|
||||
}
|
||||
|
||||
for j, container := range pod.Spec.Containers {
|
||||
v1devices := containerDevicesFromResourceDeviceInstances(p.devicesProvider.GetDevices(string(pod.UID), container.Name))
|
||||
pRes.Containers[j] = &v1alpha1.ContainerResources{
|
||||
Name: container.Name,
|
||||
Devices: v1DevicesToAlphaV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
|
||||
Devices: v1DevicesToAlphaV1(v1devices),
|
||||
}
|
||||
}
|
||||
podResources[i] = &pRes
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
@ -40,9 +40,9 @@ func (m *mockProvider) GetPods() []*v1.Pod {
|
||||
return args.Get(0).([]*v1.Pod)
|
||||
}
|
||||
|
||||
func (m *mockProvider) GetDevices(podUID, containerName string) []*podresourcesv1.ContainerDevices {
|
||||
func (m *mockProvider) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
|
||||
args := m.Called(podUID, containerName)
|
||||
return args.Get(0).([]*podresourcesv1.ContainerDevices)
|
||||
return args.Get(0).(devicemanager.ResourceDeviceInstances)
|
||||
}
|
||||
|
||||
func (m *mockProvider) GetCPUs(podUID, containerName string) cpuset.CPUSet {
|
||||
@ -70,23 +70,23 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
||||
podUID := types.UID("pod-uid")
|
||||
containerName := "container-name"
|
||||
|
||||
devs := []*podresourcesv1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev0", "dev1"},
|
||||
devs := devicemanager.ResourceDeviceInstances{
|
||||
"resource": devicemanager.DeviceInstances{
|
||||
"dev0": pluginapi.Device{},
|
||||
"dev1": pluginapi.Device{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
pods []*v1.Pod
|
||||
devices []*podresourcesv1.ContainerDevices
|
||||
devices devicemanager.ResourceDeviceInstances
|
||||
expectedResponse *v1alpha1.ListPodResourcesResponse
|
||||
}{
|
||||
{
|
||||
desc: "no pods",
|
||||
pods: []*v1.Pod{},
|
||||
devices: []*podresourcesv1.ContainerDevices{},
|
||||
devices: devicemanager.NewResourceDeviceInstances(),
|
||||
expectedResponse: &v1alpha1.ListPodResourcesResponse{},
|
||||
},
|
||||
{
|
||||
@ -107,7 +107,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
devices: []*podresourcesv1.ContainerDevices{},
|
||||
devices: devicemanager.NewResourceDeviceInstances(),
|
||||
expectedResponse: &v1alpha1.ListPodResourcesResponse{
|
||||
PodResources: []*v1alpha1.PodResources{
|
||||
{
|
||||
@ -150,7 +150,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
||||
Containers: []*v1alpha1.ContainerResources{
|
||||
{
|
||||
Name: containerName,
|
||||
Devices: v1DevicesToAlphaV1(devs),
|
||||
Devices: v1DevicesToAlphaV1(containerDevicesFromResourceDeviceInstances(devs)),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -18,7 +18,6 @@ package podresources
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
)
|
||||
@ -28,7 +27,7 @@ type DevicesProvider interface {
|
||||
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
|
||||
UpdateAllocatedDevices()
|
||||
// GetDevices returns information about the devices assigned to pods and containers
|
||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||
GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances
|
||||
// GetAllocatableDevices returns information about all the devices known to the manager
|
||||
GetAllocatableDevices() devicemanager.ResourceDeviceInstances
|
||||
}
|
||||
|
@ -47,7 +47,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||
@ -1069,7 +1068,7 @@ func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceLi
|
||||
return cm.deviceManager.GetCapacity()
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
||||
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
|
||||
return cm.deviceManager.GetDevices(podUID, containerName)
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
@ -112,7 +111,11 @@ func (cm *containerManagerStub) GetPodCgroupRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
|
||||
func (cm *containerManagerStub) GetDevices(_, _ string) devicemanager.ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -132,10 +135,6 @@ func (cm *containerManagerStub) GetCPUs(_, _ string) cpuset.CPUSet {
|
||||
return cpuset.CPUSet{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetAllocatableCPUs() cpuset.CPUSet {
|
||||
return cpuset.CPUSet{}
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
@ -217,10 +216,14 @@ func (cm *containerManagerImpl) GetPodCgroupRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
||||
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) devicemanager.ResourceDeviceInstances {
|
||||
return cm.deviceManager.GetDevices(podUID, containerName)
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
||||
return cm.deviceManager.ShouldResetExtendedResourceCapacity()
|
||||
}
|
||||
@ -240,7 +243,3 @@ func (cm *containerManagerImpl) GetCPUs(_, _ string) cpuset.CPUSet {
|
||||
func (cm *containerManagerImpl) GetAllocatableCPUs() cpuset.CPUSet {
|
||||
return cpuset.CPUSet{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
@ -152,7 +151,7 @@ func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffi
|
||||
|
||||
socketname: file,
|
||||
socketdir: dir,
|
||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
@ -1078,7 +1077,7 @@ func (m *ManagerImpl) GetAllocatableDevices() ResourceDeviceInstances {
|
||||
}
|
||||
|
||||
// GetDevices returns the devices used by the specified container
|
||||
func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
||||
func (m *ManagerImpl) GetDevices(podUID, containerName string) ResourceDeviceInstances {
|
||||
return m.podDevices.getContainerDevices(podUID, containerName)
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@ package devicemanager
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
@ -80,7 +79,12 @@ func (h *ManagerStub) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
|
||||
}
|
||||
|
||||
// GetDevices returns nil
|
||||
func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
|
||||
func (h *ManagerStub) GetDevices(_, _ string) ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllocatableDevices returns nothing
|
||||
func (h *ManagerStub) GetAllocatableDevices() ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -93,8 +97,3 @@ func (h *ManagerStub) ShouldResetExtendedResourceCapacity() bool {
|
||||
func (h *ManagerStub) UpdateAllocatedDevices() {
|
||||
return
|
||||
}
|
||||
|
||||
// GetAllocatableDevices returns nothing
|
||||
func (h *ManagerStub) GetAllocatableDevices() ResourceDeviceInstances {
|
||||
return nil
|
||||
}
|
||||
|
@ -622,7 +622,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
||||
activePods: activePods,
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
checkpointManager: ckm,
|
||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
}
|
||||
|
||||
for _, res := range testRes {
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
@ -324,7 +323,7 @@ func (pdev *podDevices) deviceRunContainerOptions(podUID, contName string) *Devi
|
||||
}
|
||||
|
||||
// getContainerDevices returns the devices assigned to the provided container for all ResourceNames
|
||||
func (pdev *podDevices) getContainerDevices(podUID, contName string) []*podresourcesapi.ContainerDevices {
|
||||
func (pdev *podDevices) getContainerDevices(podUID, contName string) ResourceDeviceInstances {
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
|
||||
@ -334,21 +333,39 @@ func (pdev *podDevices) getContainerDevices(podUID, contName string) []*podresou
|
||||
if _, contExists := pdev.devs[podUID][contName]; !contExists {
|
||||
return nil
|
||||
}
|
||||
cDev := []*podresourcesapi.ContainerDevices{}
|
||||
resDev := NewResourceDeviceInstances()
|
||||
for resource, allocateInfo := range pdev.devs[podUID][contName] {
|
||||
if len(allocateInfo.deviceIds) == 0 {
|
||||
continue
|
||||
}
|
||||
devicePluginMap := make(map[string]pluginapi.Device)
|
||||
for numaid, devlist := range allocateInfo.deviceIds {
|
||||
cDev = append(cDev, &podresourcesapi.ContainerDevices{
|
||||
ResourceName: resource,
|
||||
DeviceIds: devlist,
|
||||
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaid}}},
|
||||
})
|
||||
for _, devId := range devlist {
|
||||
NUMANodes := []*pluginapi.NUMANode{{ID: numaid}}
|
||||
if pDev, ok := devicePluginMap[devId]; ok && pDev.Topology != nil {
|
||||
if nodes := pDev.Topology.GetNodes(); nodes != nil {
|
||||
NUMANodes = append(NUMANodes, nodes...)
|
||||
}
|
||||
}
|
||||
return cDev
|
||||
}
|
||||
|
||||
// ResourceDeviceInstances is a map ping resource name -> device name -> device data
|
||||
type ResourceDeviceInstances map[string]map[string]pluginapi.Device
|
||||
devicePluginMap[devId] = pluginapi.Device{
|
||||
// ID and Healthy are not relevant here.
|
||||
Topology: &pluginapi.TopologyInfo{
|
||||
Nodes: NUMANodes,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
resDev[resource] = devicePluginMap
|
||||
}
|
||||
return resDev
|
||||
}
|
||||
|
||||
// DeviceInstances is a mapping device name -> plugin device data
|
||||
type DeviceInstances map[string]pluginapi.Device
|
||||
|
||||
// ResourceDeviceInstances is a mapping resource name -> DeviceInstances
|
||||
type ResourceDeviceInstances map[string]DeviceInstances
|
||||
|
||||
func NewResourceDeviceInstances() ResourceDeviceInstances {
|
||||
return make(ResourceDeviceInstances)
|
||||
|
@ -35,13 +35,18 @@ func TestGetContainerDevices(t *testing.T) {
|
||||
devices,
|
||||
constructAllocResp(map[string]string{"/dev/r1dev1": "/dev/r1dev1", "/dev/r1dev2": "/dev/r1dev2"}, map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{}))
|
||||
|
||||
contDevices := podDevices.getContainerDevices(podID, contID)
|
||||
require.Equal(t, len(devices), len(contDevices), "Incorrect container devices")
|
||||
for _, contDev := range contDevices {
|
||||
for _, node := range contDev.Topology.Nodes {
|
||||
resContDevices := podDevices.getContainerDevices(podID, contID)
|
||||
contDevices, ok := resContDevices[resourceName1]
|
||||
require.True(t, ok, "resource %q not present", resourceName1)
|
||||
|
||||
for devId, plugInfo := range contDevices {
|
||||
nodes := plugInfo.GetTopology().GetNodes()
|
||||
require.Equal(t, len(nodes), len(devices), "Incorrect container devices: %v - %v (nodes %v)", devices, contDevices, nodes)
|
||||
|
||||
for _, node := range plugInfo.GetTopology().GetNodes() {
|
||||
dev, ok := devices[node.ID]
|
||||
require.True(t, ok, "NUMA id %v doesn't exist in result", node.ID)
|
||||
require.Equal(t, contDev.DeviceIds[0], dev[0], "Can't find device %s in result", dev[0])
|
||||
require.Equal(t, devId, dev[0], "Can't find device %s in result", dev[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
podDevices: newPodDevices(),
|
||||
@ -66,7 +66,7 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
}
|
||||
|
||||
for r := range tc.devices {
|
||||
m.allDevices[r] = make(map[string]pluginapi.Device)
|
||||
m.allDevices[r] = make(DeviceInstances)
|
||||
m.healthyDevices[r] = sets.NewString()
|
||||
|
||||
for _, d := range tc.devices[r] {
|
||||
@ -409,7 +409,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
||||
}
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
@ -419,7 +419,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
||||
topologyAffinityStore: &mockAffinityStore{tc.hint},
|
||||
}
|
||||
|
||||
m.allDevices[tc.resource] = make(map[string]pluginapi.Device)
|
||||
m.allDevices[tc.resource] = make(DeviceInstances)
|
||||
m.healthyDevices[tc.resource] = sets.NewString()
|
||||
m.endpoints[tc.resource] = endpointInfo{}
|
||||
|
||||
@ -598,7 +598,7 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
|
||||
}
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
@ -608,7 +608,7 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
|
||||
topologyAffinityStore: &mockAffinityStore{tc.hint},
|
||||
}
|
||||
|
||||
m.allDevices[tc.resource] = make(map[string]pluginapi.Device)
|
||||
m.allDevices[tc.resource] = make(DeviceInstances)
|
||||
m.healthyDevices[tc.resource] = sets.NewString()
|
||||
for _, d := range tc.allDevices {
|
||||
m.allDevices[tc.resource][d.ID] = d
|
||||
@ -920,7 +920,7 @@ func TestGetPodTopologyHints(t *testing.T) {
|
||||
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
podDevices: newPodDevices(),
|
||||
@ -930,7 +930,7 @@ func TestGetPodTopologyHints(t *testing.T) {
|
||||
}
|
||||
|
||||
for r := range tc.devices {
|
||||
m.allDevices[r] = make(map[string]pluginapi.Device)
|
||||
m.allDevices[r] = make(DeviceInstances)
|
||||
m.healthyDevices[r] = sets.NewString()
|
||||
|
||||
for _, d := range tc.devices[r] {
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -60,7 +59,10 @@ type Manager interface {
|
||||
GetWatcherHandler() cache.PluginHandler
|
||||
|
||||
// GetDevices returns information about the devices assigned to pods and containers
|
||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||
GetDevices(podUID, containerName string) ResourceDeviceInstances
|
||||
|
||||
// GetAllocatableDevices returns information about all the devices known to the manager
|
||||
GetAllocatableDevices() ResourceDeviceInstances
|
||||
|
||||
// ShouldResetExtendedResourceCapacity returns whether the extended resources should be reset or not,
|
||||
// depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates
|
||||
@ -77,9 +79,6 @@ type Manager interface {
|
||||
|
||||
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
|
||||
UpdateAllocatedDevices()
|
||||
|
||||
// GetAllocatableDevices returns information about all the devices known to the manager
|
||||
GetAllocatableDevices() ResourceDeviceInstances
|
||||
}
|
||||
|
||||
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
@ -169,13 +168,20 @@ func (cm *FakeContainerManager) GetPodCgroupRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
|
||||
func (cm *FakeContainerManager) GetDevices(_, _ string) devicemanager.ResourceDeviceInstances {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
cm.CalledFunctions = append(cm.CalledFunctions, "GetDevices")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) ShouldResetExtendedResourceCapacity() bool {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
@ -204,13 +210,6 @@ func (cm *FakeContainerManager) GetCPUs(_, _ string) cpuset.CPUSet {
|
||||
return cpuset.CPUSet{}
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) GetAllocatableDevices() devicemanager.ResourceDeviceInstances {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) GetAllocatableCPUs() cpuset.CPUSet {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
|
Loading…
Reference in New Issue
Block a user