mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-19 17:16:12 +00:00
Merge pull request #95734 from fromanirh/podresources-concrete-resources-apis
podresources APIs: concrete resources apis: implement GetAllocatableResources
This commit is contained in:
@@ -18,7 +18,10 @@ package podresources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
|
||||
"k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
@@ -44,6 +47,7 @@ func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesP
|
||||
// List returns information about the resources assigned to pods on the node
|
||||
func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResourcesRequest) (*v1.ListPodResourcesResponse, error) {
|
||||
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
|
||||
metrics.PodResourcesEndpointRequestsListCount.WithLabelValues("v1").Inc()
|
||||
|
||||
pods := p.podsProvider.GetPods()
|
||||
podResources := make([]*v1.PodResources, len(pods))
|
||||
@@ -70,3 +74,21 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
|
||||
PodResources: podResources,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetAllocatableResources returns information about all the resources known by the server - this more like the capacity, not like the current amount of free resources.
|
||||
func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *v1.AllocatableResourcesRequest) (*v1.AllocatableResourcesResponse, error) {
|
||||
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
|
||||
metrics.PodResourcesEndpointRequestsGetAllocatableCount.WithLabelValues("v1").Inc()
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesGetAllocatable) {
|
||||
metrics.PodResourcesEndpointErrorsGetAllocatableCount.WithLabelValues("v1").Inc()
|
||||
return nil, fmt.Errorf("Pod Resources API GetAllocatableResources disabled")
|
||||
}
|
||||
|
||||
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
|
||||
|
||||
return &v1.AllocatableResourcesResponse{
|
||||
Devices: p.devicesProvider.GetAllocatableDevices(),
|
||||
CpuIds: p.cpusProvider.GetAllocatableCPUs(),
|
||||
}, nil
|
||||
}
|
||||
|
@@ -18,12 +18,19 @@ package podresources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
pkgfeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
)
|
||||
|
||||
func TestListPodResourcesV1(t *testing.T) {
|
||||
@@ -135,14 +142,335 @@ func TestListPodResourcesV1(t *testing.T) {
|
||||
m.On("GetDevices", string(podUID), containerName).Return(tc.devices)
|
||||
m.On("GetCPUs", string(podUID), containerName).Return(tc.cpus)
|
||||
m.On("UpdateAllocatedDevices").Return()
|
||||
m.On("GetAllocatableCPUs").Return(cpuset.CPUSet{})
|
||||
m.On("GetAllocatableDevices").Return(devicemanager.NewResourceDeviceInstances())
|
||||
server := NewV1PodResourcesServer(m, m, m)
|
||||
resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{})
|
||||
if err != nil {
|
||||
t.Errorf("want err = %v, got %q", nil, err)
|
||||
}
|
||||
if tc.expectedResponse.String() != resp.String() {
|
||||
if !equalListResponse(tc.expectedResponse, resp) {
|
||||
t.Errorf("want resp = %s, got %s", tc.expectedResponse.String(), resp.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocatableResources(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesGetAllocatable, true)()
|
||||
|
||||
allDevs := []*podresourcesapi.ContainerDevices{
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev0"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev1"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-nt",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-mm",
|
||||
DeviceIds: []string{"devM0"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-mm",
|
||||
DeviceIds: []string{"devMM"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
allCPUs := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
allCPUs []int64
|
||||
allDevices []*podresourcesapi.ContainerDevices
|
||||
expectedAllocatableResourcesResponse *podresourcesapi.AllocatableResourcesResponse
|
||||
}{
|
||||
{
|
||||
desc: "no devices, no CPUs",
|
||||
allCPUs: []int64{},
|
||||
allDevices: []*podresourcesapi.ContainerDevices{},
|
||||
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{},
|
||||
},
|
||||
{
|
||||
desc: "no devices, all CPUs",
|
||||
allCPUs: allCPUs,
|
||||
allDevices: []*podresourcesapi.ContainerDevices{},
|
||||
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
|
||||
CpuIds: allCPUs,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with devices, all CPUs",
|
||||
allCPUs: allCPUs,
|
||||
allDevices: allDevs,
|
||||
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
|
||||
CpuIds: allCPUs,
|
||||
Devices: []*podresourcesapi.ContainerDevices{
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev0"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev1"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-nt",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-mm",
|
||||
DeviceIds: []string{"devM0"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-mm",
|
||||
DeviceIds: []string{"devMM"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with devices, no CPUs",
|
||||
allCPUs: []int64{},
|
||||
allDevices: allDevs,
|
||||
expectedAllocatableResourcesResponse: &podresourcesapi.AllocatableResourcesResponse{
|
||||
Devices: []*podresourcesapi.ContainerDevices{
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev0"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource",
|
||||
DeviceIds: []string{"dev1"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-nt",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-mm",
|
||||
DeviceIds: []string{"devM0"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ResourceName: "resource-mm",
|
||||
DeviceIds: []string{"devMM"},
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
m := new(mockProvider)
|
||||
m.On("GetDevices", "", "").Return([]*podresourcesapi.ContainerDevices{})
|
||||
m.On("GetCPUs", "", "").Return([]int64{})
|
||||
m.On("UpdateAllocatedDevices").Return()
|
||||
m.On("GetAllocatableDevices").Return(tc.allDevices)
|
||||
m.On("GetAllocatableCPUs").Return(tc.allCPUs)
|
||||
server := NewV1PodResourcesServer(m, m, m)
|
||||
|
||||
resp, err := server.GetAllocatableResources(context.TODO(), &podresourcesapi.AllocatableResourcesRequest{})
|
||||
if err != nil {
|
||||
t.Errorf("want err = %v, got %q", nil, err)
|
||||
}
|
||||
|
||||
if !equalAllocatableResourcesResponse(tc.expectedAllocatableResourcesResponse, resp) {
|
||||
t.Errorf("want resp = %s, got %s", tc.expectedAllocatableResourcesResponse.String(), resp.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func equalListResponse(respA, respB *podresourcesapi.ListPodResourcesResponse) bool {
|
||||
if len(respA.PodResources) != len(respB.PodResources) {
|
||||
return false
|
||||
}
|
||||
for idx := 0; idx < len(respA.PodResources); idx++ {
|
||||
podResA := respA.PodResources[idx]
|
||||
podResB := respB.PodResources[idx]
|
||||
if podResA.Name != podResB.Name {
|
||||
return false
|
||||
}
|
||||
if podResA.Namespace != podResB.Namespace {
|
||||
return false
|
||||
}
|
||||
if len(podResA.Containers) != len(podResB.Containers) {
|
||||
return false
|
||||
}
|
||||
for jdx := 0; jdx < len(podResA.Containers); jdx++ {
|
||||
cntA := podResA.Containers[jdx]
|
||||
cntB := podResB.Containers[jdx]
|
||||
|
||||
if cntA.Name != cntB.Name {
|
||||
return false
|
||||
}
|
||||
if !equalInt64s(cntA.CpuIds, cntB.CpuIds) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !equalContainerDevices(cntA.Devices, cntB.Devices) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func equalContainerDevices(devA, devB []*podresourcesapi.ContainerDevices) bool {
|
||||
if len(devA) != len(devB) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx := 0; idx < len(devA); idx++ {
|
||||
cntDevA := devA[idx]
|
||||
cntDevB := devB[idx]
|
||||
|
||||
if cntDevA.ResourceName != cntDevB.ResourceName {
|
||||
return false
|
||||
}
|
||||
if !equalTopology(cntDevA.Topology, cntDevB.Topology) {
|
||||
return false
|
||||
}
|
||||
if !equalStrings(cntDevA.DeviceIds, cntDevB.DeviceIds) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func equalInt64s(a, b []int64) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
aCopy := append([]int64{}, a...)
|
||||
sort.Slice(aCopy, func(i, j int) bool { return aCopy[i] < aCopy[j] })
|
||||
bCopy := append([]int64{}, b...)
|
||||
sort.Slice(bCopy, func(i, j int) bool { return bCopy[i] < bCopy[j] })
|
||||
return reflect.DeepEqual(aCopy, bCopy)
|
||||
}
|
||||
|
||||
func equalStrings(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
aCopy := append([]string{}, a...)
|
||||
sort.Strings(aCopy)
|
||||
bCopy := append([]string{}, b...)
|
||||
sort.Strings(bCopy)
|
||||
return reflect.DeepEqual(aCopy, bCopy)
|
||||
}
|
||||
|
||||
func equalTopology(a, b *podresourcesapi.TopologyInfo) bool {
|
||||
if a == nil && b != nil {
|
||||
return false
|
||||
}
|
||||
if a != nil && b == nil {
|
||||
return false
|
||||
}
|
||||
return reflect.DeepEqual(a, b)
|
||||
}
|
||||
|
||||
func equalAllocatableResourcesResponse(respA, respB *podresourcesapi.AllocatableResourcesResponse) bool {
|
||||
if !equalInt64s(respA.CpuIds, respB.CpuIds) {
|
||||
return false
|
||||
}
|
||||
return equalContainerDevices(respA.Devices, respB.Devices)
|
||||
}
|
||||
|
@@ -52,6 +52,16 @@ func (m *mockProvider) UpdateAllocatedDevices() {
|
||||
m.Called()
|
||||
}
|
||||
|
||||
func (m *mockProvider) GetAllocatableDevices() []*podresourcesv1.ContainerDevices {
|
||||
args := m.Called()
|
||||
return args.Get(0).([]*podresourcesv1.ContainerDevices)
|
||||
}
|
||||
|
||||
func (m *mockProvider) GetAllocatableCPUs() []int64 {
|
||||
args := m.Called()
|
||||
return args.Get(0).([]int64)
|
||||
}
|
||||
|
||||
func TestListPodResourcesV1alpha1(t *testing.T) {
|
||||
podName := "pod-name"
|
||||
podNamespace := "pod-namespace"
|
||||
|
@@ -23,8 +23,12 @@ import (
|
||||
|
||||
// DevicesProvider knows how to provide the devices used by the given container
|
||||
type DevicesProvider interface {
|
||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
|
||||
UpdateAllocatedDevices()
|
||||
// GetDevices returns information about the devices assigned to pods and containers
|
||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||
// GetAllocatableDevices returns information about all the devices known to the manager
|
||||
GetAllocatableDevices() []*podresourcesapi.ContainerDevices
|
||||
}
|
||||
|
||||
// PodsProvider knows how to provide the pods admitted by the node
|
||||
@@ -34,5 +38,8 @@ type PodsProvider interface {
|
||||
|
||||
// CPUsProvider knows how to provide the cpus used by the given container
|
||||
type CPUsProvider interface {
|
||||
// GetCPUs returns information about the cpus assigned to pods and containers
|
||||
GetCPUs(podUID, containerName string) []int64
|
||||
// GetAllocatableCPUs returns the allocatable (not allocated) CPUs
|
||||
GetAllocatableCPUs() []int64
|
||||
}
|
||||
|
Reference in New Issue
Block a user