Make PodResources API include restartable init containers

This commit is contained in:
Gunju Kim 2023-09-17 20:17:58 +09:00
parent 11785bb815
commit dd890b899f
No known key found for this signature in database
GPG Key ID: 9300A528F3F0DAB7
4 changed files with 932 additions and 53 deletions

View File

@ -20,11 +20,13 @@ import (
"context" "context"
"fmt" "fmt"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubelet/pkg/apis/podresources/v1" podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
) )
// v1PodResourcesServer implements PodResourcesListerServer // v1PodResourcesServer implements PodResourcesListerServer
@ -38,7 +40,7 @@ type v1PodResourcesServer struct {
// NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider // NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
// with device information provided by the DevicesProvider // with device information provided by the DevicesProvider
func NewV1PodResourcesServer(providers PodResourcesProviders) v1.PodResourcesListerServer { func NewV1PodResourcesServer(providers PodResourcesProviders) podresourcesv1.PodResourcesListerServer {
return &v1PodResourcesServer{ return &v1PodResourcesServer{
podsProvider: providers.Pods, podsProvider: providers.Pods,
devicesProvider: providers.Devices, devicesProvider: providers.Devices,
@ -49,48 +51,51 @@ func NewV1PodResourcesServer(providers PodResourcesProviders) v1.PodResourcesLis
} }
// List returns information about the resources assigned to pods on the node // List returns information about the resources assigned to pods on the node
func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResourcesRequest) (*v1.ListPodResourcesResponse, error) { func (p *v1PodResourcesServer) List(ctx context.Context, req *podresourcesv1.ListPodResourcesRequest) (*podresourcesv1.ListPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsListCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsListCount.WithLabelValues("v1").Inc()
pods := p.podsProvider.GetPods() pods := p.podsProvider.GetPods()
podResources := make([]*v1.PodResources, len(pods)) podResources := make([]*podresourcesv1.PodResources, len(pods))
p.devicesProvider.UpdateAllocatedDevices() p.devicesProvider.UpdateAllocatedDevices()
for i, pod := range pods { for i, pod := range pods {
pRes := v1.PodResources{ pRes := podresourcesv1.PodResources{
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
Containers: make([]*v1.ContainerResources, len(pod.Spec.Containers)), Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
} }
for j, container := range pod.Spec.Containers { if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
pRes.Containers[j] = &v1.ContainerResources{ pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
Name: container.Name,
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), for _, container := range pod.Spec.InitContainers {
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), if !types.IsRestartableInitContainer(&container) {
Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name), continue
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) {
pRes.Containers[j].DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, &container)
} }
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
}
}
for _, container := range pod.Spec.Containers {
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
} }
podResources[i] = &pRes podResources[i] = &pRes
} }
response := &v1.ListPodResourcesResponse{ response := &podresourcesv1.ListPodResourcesResponse{
PodResources: podResources, PodResources: podResources,
} }
return response, nil return response, nil
} }
// GetAllocatableResources returns information about all the resources known by the server - this more like the capacity, not like the current amount of free resources. // GetAllocatableResources returns information about all the resources known by the server - this more like the capacity, not like the current amount of free resources.
func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *v1.AllocatableResourcesRequest) (*v1.AllocatableResourcesResponse, error) { func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *podresourcesv1.AllocatableResourcesRequest) (*podresourcesv1.AllocatableResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsGetAllocatableCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsGetAllocatableCount.WithLabelValues("v1").Inc()
response := &v1.AllocatableResourcesResponse{ response := &podresourcesv1.AllocatableResourcesResponse{
Devices: p.devicesProvider.GetAllocatableDevices(), Devices: p.devicesProvider.GetAllocatableDevices(),
CpuIds: p.cpusProvider.GetAllocatableCPUs(), CpuIds: p.cpusProvider.GetAllocatableCPUs(),
Memory: p.memoryProvider.GetAllocatableMemory(), Memory: p.memoryProvider.GetAllocatableMemory(),
@ -100,7 +105,7 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req
} }
// Get returns information about the resources assigned to a specific pod // Get returns information about the resources assigned to a specific pod
func (p *v1PodResourcesServer) Get(ctx context.Context, req *v1.GetPodResourcesRequest) (*v1.GetPodResourcesResponse, error) { func (p *v1PodResourcesServer) Get(ctx context.Context, req *podresourcesv1.GetPodResourcesRequest) (*podresourcesv1.GetPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsGetCount.WithLabelValues("v1").Inc() metrics.PodResourcesEndpointRequestsGetCount.WithLabelValues("v1").Inc()
@ -115,26 +120,44 @@ func (p *v1PodResourcesServer) Get(ctx context.Context, req *v1.GetPodResourcesR
return nil, fmt.Errorf("pod %s in namespace %s not found", req.PodName, req.PodNamespace) return nil, fmt.Errorf("pod %s in namespace %s not found", req.PodName, req.PodNamespace)
} }
podResources := &v1.PodResources{ podResources := &podresourcesv1.PodResources{
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
Containers: make([]*v1.ContainerResources, len(pod.Spec.Containers)), Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
} }
for i, container := range pod.Spec.Containers { if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
podResources.Containers[i] = &v1.ContainerResources{ podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
for _, container := range pod.Spec.InitContainers {
if !types.IsRestartableInitContainer(&container) {
continue
}
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
}
}
for _, container := range pod.Spec.Containers {
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
}
response := &podresourcesv1.GetPodResourcesResponse{
PodResources: podResources,
}
return response, nil
}
func (p *v1PodResourcesServer) getContainerResources(pod *v1.Pod, container *v1.Container) *podresourcesv1.ContainerResources {
containerResources := &podresourcesv1.ContainerResources{
Name: container.Name, Name: container.Name,
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name), Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name),
} }
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) { if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) {
podResources.Containers[i].DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, &container) containerResources.DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, container)
}
} }
response := &v1.GetPodResourcesResponse{ return containerResources
PodResources: podResources,
}
return response, nil
} }

View File

@ -221,7 +221,7 @@ func TestListPodResourcesV1(t *testing.T) {
mockMemoryProvider := podresourcetest.NewMockMemoryProvider(mockCtrl) mockMemoryProvider := podresourcetest.NewMockMemoryProvider(mockCtrl)
mockDynamicResourcesProvider := podresourcetest.NewMockDynamicResourcesProvider(mockCtrl) mockDynamicResourcesProvider := podresourcetest.NewMockDynamicResourcesProvider(mockCtrl)
mockPodsProvider.EXPECT().GetPods().Return(tc.pods).AnyTimes().AnyTimes() mockPodsProvider.EXPECT().GetPods().Return(tc.pods).AnyTimes()
mockDevicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(tc.devices).AnyTimes() mockDevicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(tc.devices).AnyTimes()
mockCPUsProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(tc.cpus).AnyTimes() mockCPUsProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(tc.cpus).AnyTimes()
mockMemoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(tc.memory).AnyTimes() mockMemoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(tc.memory).AnyTimes()
@ -250,6 +250,311 @@ func TestListPodResourcesV1(t *testing.T) {
} }
} }
func TestListPodResourcesWithInitContainersV1(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesDynamicResources, true)()
podName := "pod-name"
podNamespace := "pod-namespace"
podUID := types.UID("pod-uid")
initContainerName := "init-container-name"
containerName := "container-name"
numaID := int64(1)
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
devs := []*podresourcesapi.ContainerDevices{
{
ResourceName: "resource",
DeviceIds: []string{"dev0", "dev1"},
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
}
cpus := []int64{12, 23, 30}
memory := []*podresourcesapi.ContainerMemory{
{
MemoryType: "memory",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
{
MemoryType: "hugepages-1Gi",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
}
containers := []v1.Container{
{
Name: containerName,
},
}
for _, tc := range []struct {
desc string
pods []*v1.Pod
mockFunc func(
[]*v1.Pod,
*podresourcetest.MockDevicesProvider,
*podresourcetest.MockCPUsProvider,
*podresourcetest.MockMemoryProvider,
*podresourcetest.MockDynamicResourcesProvider)
sidecarContainersEnabled bool
expectedResponse *podresourcesapi.ListPodResourcesResponse
}{
{
desc: "pod having an init container",
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
},
},
Containers: containers,
},
},
},
mockFunc: func(
pods []*v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{
{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
},
{
desc: "pod having an init container with SidecarContainers enabled",
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
},
},
Containers: containers,
},
},
},
mockFunc: func(
pods []*v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
sidecarContainersEnabled: true,
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{
{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
},
{
desc: "pod having a restartable init container with SidecarContainers disabled",
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
RestartPolicy: &containerRestartPolicyAlways,
},
},
Containers: containers,
},
},
},
mockFunc: func(
pods []*v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{
{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
},
{
desc: "pod having an init container with SidecarContainers enabled",
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
RestartPolicy: &containerRestartPolicyAlways,
},
},
Containers: containers,
},
},
},
mockFunc: func(
pods []*v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), initContainerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), initContainerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), initContainerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.InitContainers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
sidecarContainersEnabled: true,
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
PodResources: []*podresourcesapi.PodResources{
{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: initContainerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
},
} {
t.Run(tc.desc, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.SidecarContainers, tc.sidecarContainersEnabled)()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDevicesProvider := podresourcetest.NewMockDevicesProvider(mockCtrl)
mockPodsProvider := podresourcetest.NewMockPodsProvider(mockCtrl)
mockCPUsProvider := podresourcetest.NewMockCPUsProvider(mockCtrl)
mockMemoryProvider := podresourcetest.NewMockMemoryProvider(mockCtrl)
mockDynamicResourcesProvider := podresourcetest.NewMockDynamicResourcesProvider(mockCtrl)
mockPodsProvider.EXPECT().GetPods().Return(tc.pods).AnyTimes()
tc.mockFunc(tc.pods, mockDevicesProvider, mockCPUsProvider, mockMemoryProvider, mockDynamicResourcesProvider)
providers := PodResourcesProviders{
Pods: mockPodsProvider,
Devices: mockDevicesProvider,
Cpus: mockCPUsProvider,
Memory: mockMemoryProvider,
DynamicResources: mockDynamicResourcesProvider,
}
server := NewV1PodResourcesServer(providers)
resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{})
if err != nil {
t.Errorf("want err = %v, got %q", nil, err)
}
if !equalListResponse(tc.expectedResponse, resp) {
t.Errorf("want resp = %s, got %s", tc.expectedResponse.String(), resp.String())
}
})
}
}
func TestAllocatableResources(t *testing.T) { func TestAllocatableResources(t *testing.T) {
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
@ -724,6 +1029,297 @@ func TestGetPodResourcesV1(t *testing.T) {
} }
func TestGetPodResourcesWithInitContainersV1(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesGet, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesDynamicResources, true)()
podName := "pod-name"
podNamespace := "pod-namespace"
podUID := types.UID("pod-uid")
initContainerName := "init-container-name"
containerName := "container-name"
numaID := int64(1)
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
devs := []*podresourcesapi.ContainerDevices{
{
ResourceName: "resource",
DeviceIds: []string{"dev0", "dev1"},
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
}
cpus := []int64{12, 23, 30}
memory := []*podresourcesapi.ContainerMemory{
{
MemoryType: "memory",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
{
MemoryType: "hugepages-1Gi",
Size_: 1073741824,
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
},
}
containers := []v1.Container{
{
Name: containerName,
},
}
for _, tc := range []struct {
desc string
pod *v1.Pod
mockFunc func(
*v1.Pod,
*podresourcetest.MockDevicesProvider,
*podresourcetest.MockCPUsProvider,
*podresourcetest.MockMemoryProvider,
*podresourcetest.MockDynamicResourcesProvider)
sidecarContainersEnabled bool
expectedResponse *podresourcesapi.GetPodResourcesResponse
}{
{
desc: "pod having an init container",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
},
},
Containers: containers,
},
},
mockFunc: func(
pod *v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
PodResources: &podresourcesapi.PodResources{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
{
desc: "pod having an init container with SidecarContainers enabled",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
},
},
Containers: containers,
},
},
mockFunc: func(
pod *v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
sidecarContainersEnabled: true,
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
PodResources: &podresourcesapi.PodResources{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
{
desc: "pod having a restartable init container with SidecarContainers disabled",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
RestartPolicy: &containerRestartPolicyAlways,
},
},
Containers: containers,
},
},
mockFunc: func(
pod *v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
PodResources: &podresourcesapi.PodResources{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
{
desc: "pod having an init container with SidecarContainers enabled",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
UID: podUID,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: initContainerName,
RestartPolicy: &containerRestartPolicyAlways,
},
},
Containers: containers,
},
},
mockFunc: func(
pod *v1.Pod,
devicesProvider *podresourcetest.MockDevicesProvider,
cpusProvider *podresourcetest.MockCPUsProvider,
memoryProvider *podresourcetest.MockMemoryProvider,
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), initContainerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), initContainerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), initContainerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.InitContainers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).AnyTimes()
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).AnyTimes()
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).AnyTimes()
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).AnyTimes()
},
sidecarContainersEnabled: true,
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
PodResources: &podresourcesapi.PodResources{
Name: podName,
Namespace: podNamespace,
Containers: []*podresourcesapi.ContainerResources{
{
Name: initContainerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
{
Name: containerName,
Devices: devs,
CpuIds: cpus,
Memory: memory,
DynamicResources: []*podresourcesapi.DynamicResource{},
},
},
},
},
},
} {
t.Run(tc.desc, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.SidecarContainers, tc.sidecarContainersEnabled)()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDevicesProvider := podresourcetest.NewMockDevicesProvider(mockCtrl)
mockPodsProvider := podresourcetest.NewMockPodsProvider(mockCtrl)
mockCPUsProvider := podresourcetest.NewMockCPUsProvider(mockCtrl)
mockMemoryProvider := podresourcetest.NewMockMemoryProvider(mockCtrl)
mockDynamicResourcesProvider := podresourcetest.NewMockDynamicResourcesProvider(mockCtrl)
mockPodsProvider.EXPECT().GetPodByName(podNamespace, podName).Return(tc.pod, true).AnyTimes()
tc.mockFunc(tc.pod, mockDevicesProvider, mockCPUsProvider, mockMemoryProvider, mockDynamicResourcesProvider)
providers := PodResourcesProviders{
Pods: mockPodsProvider,
Devices: mockDevicesProvider,
Cpus: mockCPUsProvider,
Memory: mockMemoryProvider,
DynamicResources: mockDynamicResourcesProvider,
}
server := NewV1PodResourcesServer(providers)
podReq := &podresourcesapi.GetPodResourcesRequest{PodName: podName, PodNamespace: podNamespace}
resp, err := server.Get(context.TODO(), podReq)
if err != nil {
t.Errorf("want err = %v, got %q", nil, err)
}
if !equalGetResponse(tc.expectedResponse, resp) {
t.Errorf("want resp = %s, got %s", tc.expectedResponse.String(), resp.String())
}
})
}
}
func equalListResponse(respA, respB *podresourcesapi.ListPodResourcesResponse) bool { func equalListResponse(respA, respB *podresourcesapi.ListPodResourcesResponse) bool {
if len(respA.PodResources) != len(respB.PodResources) { if len(respA.PodResources) != len(respB.PodResources) {
return false return false

View File

@ -687,21 +687,21 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Expect(resourcesForOurPod.Name).To(gomega.Equal(pod1.Name)) gomega.Expect(resourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
gomega.Expect(resourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace)) gomega.Expect(resourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
// Note that the kubelet does not report resources for restartable gomega.Expect(resourcesForOurPod.Containers).To(gomega.HaveLen(2))
// init containers for now.
// See https://github.com/kubernetes/kubernetes/issues/120501.
// TODO: Fix this test to check the resources allocated to the
// restartable init container once the kubelet reports resources
// for restartable init containers.
gomega.Expect(resourcesForOurPod.Containers).To(gomega.HaveLen(1))
gomega.Expect(resourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name)) for _, container := range resourcesForOurPod.Containers {
if container.Name == pod1.Spec.InitContainers[1].Name {
gomega.Expect(resourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1)) gomega.Expect(container.Devices).To(gomega.HaveLen(1))
gomega.Expect(container.Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName)) gomega.Expect(container.Devices[0].DeviceIds).To(gomega.HaveLen(1))
} else if container.Name == pod1.Spec.Containers[0].Name {
gomega.Expect(resourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1)) gomega.Expect(container.Devices).To(gomega.HaveLen(1))
gomega.Expect(container.Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(container.Devices[0].DeviceIds).To(gomega.HaveLen(1))
} else {
framework.Failf("unexpected container name: %s", container.Name)
}
}
}) })
}) })
} }

View File

@ -62,6 +62,7 @@ type podDesc struct {
resourceName string resourceName string
resourceAmount int resourceAmount int
cpuRequest int // cpuRequest is in millicores cpuRequest int // cpuRequest is in millicores
initContainers []initContainerDesc
} }
func (desc podDesc) CpuRequestQty() resource.Quantity { func (desc podDesc) CpuRequestQty() resource.Quantity {
@ -86,6 +87,36 @@ func (desc podDesc) RequiresDevices() bool {
return desc.resourceName != "" && desc.resourceAmount > 0 return desc.resourceName != "" && desc.resourceAmount > 0
} }
type initContainerDesc struct {
cntName string
resourceName string
resourceAmount int
cpuRequest int // cpuRequest is in millicores
restartPolicy *v1.ContainerRestartPolicy
}
func (desc initContainerDesc) CPURequestQty() resource.Quantity {
qty := resource.NewMilliQuantity(int64(desc.cpuRequest), resource.DecimalSI)
return *qty
}
func (desc initContainerDesc) CPURequestExclusive() int {
if (desc.cpuRequest % 1000) != 0 {
// exclusive cpus are request only if the quantity is integral;
// hence, explicitly rule out non-integral requests
return 0
}
return desc.cpuRequest / 1000
}
func (desc initContainerDesc) RequiresCPU() bool {
return desc.cpuRequest > 0
}
func (desc initContainerDesc) RequiresDevices() bool {
return desc.resourceName != "" && desc.resourceAmount > 0
}
func makePodResourcesTestPod(desc podDesc) *v1.Pod { func makePodResourcesTestPod(desc podDesc) *v1.Pod {
cnt := v1.Container{ cnt := v1.Container{
Name: desc.cntName, Name: desc.cntName,
@ -108,12 +139,44 @@ func makePodResourcesTestPod(desc podDesc) *v1.Pod {
cnt.Resources.Requests[v1.ResourceName(desc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", desc.resourceAmount)) cnt.Resources.Requests[v1.ResourceName(desc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", desc.resourceAmount))
cnt.Resources.Limits[v1.ResourceName(desc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", desc.resourceAmount)) cnt.Resources.Limits[v1.ResourceName(desc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", desc.resourceAmount))
} }
var initCnts []v1.Container
for _, cntDesc := range desc.initContainers {
initCnt := v1.Container{
Name: cntDesc.cntName,
Image: busyboxImage,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{},
},
Command: []string{"sh", "-c", "sleep 5s"},
RestartPolicy: cntDesc.restartPolicy,
}
if cntDesc.restartPolicy != nil && *cntDesc.restartPolicy == v1.ContainerRestartPolicyAlways {
initCnt.Command = []string{"sh", "-c", "sleep 1d"}
}
if cntDesc.RequiresCPU() {
cpuRequestQty := cntDesc.CPURequestQty()
initCnt.Resources.Requests[v1.ResourceCPU] = cpuRequestQty
initCnt.Resources.Limits[v1.ResourceCPU] = cpuRequestQty
// we don't really care, we only need to be in guaranteed QoS
initCnt.Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi")
initCnt.Resources.Limits[v1.ResourceMemory] = resource.MustParse("100Mi")
}
if cntDesc.RequiresDevices() {
initCnt.Resources.Requests[v1.ResourceName(cntDesc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", cntDesc.resourceAmount))
initCnt.Resources.Limits[v1.ResourceName(cntDesc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", cntDesc.resourceAmount))
}
initCnts = append(initCnts, initCnt)
}
return &v1.Pod{ return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: desc.podName, Name: desc.podName,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
InitContainers: initCnts,
Containers: []v1.Container{ Containers: []v1.Container{
cnt, cnt,
}, },
@ -258,6 +321,59 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
} }
} }
// check init containers
for _, initCntDesc := range podReq.initContainers {
if initCntDesc.restartPolicy == nil || *initCntDesc.restartPolicy != v1.ContainerRestartPolicyAlways {
// If the init container is not restartable, we don't expect it
// to be reported.
_, ok := podInfo[initCntDesc.cntName]
if ok {
return fmt.Errorf("pod %q regular init container %q should not be reported", podReq.podName, initCntDesc.cntName)
}
continue
}
cntInfo, ok := podInfo[initCntDesc.cntName]
if !ok {
return fmt.Errorf("no container resources for pod %q container %q", podReq.podName, initCntDesc.cntName)
}
if initCntDesc.RequiresCPU() {
if exclusiveCpus := initCntDesc.CPURequestExclusive(); exclusiveCpus != len(cntInfo.CpuIds) {
if exclusiveCpus == 0 {
return fmt.Errorf("pod %q container %q requested %d expected to be allocated CPUs from shared pool %v", podReq.podName, initCntDesc.cntName, initCntDesc.cpuRequest, cntInfo.CpuIds)
}
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, initCntDesc.cntName, exclusiveCpus, cntInfo.CpuIds)
}
}
if initCntDesc.RequiresDevices() {
dev := findContainerDeviceByName(cntInfo.GetDevices(), initCntDesc.resourceName)
if dev == nil {
return fmt.Errorf("pod %q container %q expected data for resource %q not found", podReq.podName, initCntDesc.cntName, initCntDesc.resourceName)
}
if len(dev.DeviceIds) != initCntDesc.resourceAmount {
return fmt.Errorf("pod %q container %q resource %q expected %d items got %v", podReq.podName, initCntDesc.cntName, initCntDesc.resourceName, initCntDesc.resourceAmount, dev.DeviceIds)
}
} else {
devs := cntInfo.GetDevices()
if len(devs) > 0 {
return fmt.Errorf("pod %q container %q expected no resources, got %v", podReq.podName, initCntDesc.cntName, devs)
}
}
if cnts, ok := found[defaultTopologyUnawareResourceName]; ok {
for _, cnt := range cnts {
for _, cd := range cnt.GetDevices() {
if cd.ResourceName != defaultTopologyUnawareResourceName {
continue
}
if cd.Topology != nil {
// we expect nil topology
return fmt.Errorf("Nil topology is expected")
}
}
}
}
}
} }
return nil return nil
} }
@ -283,7 +399,7 @@ func filterOutDesc(descs []podDesc, name string) []podDesc {
return ret return ret
} }
func podresourcesListTests(ctx context.Context, f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData) { func podresourcesListTests(ctx context.Context, f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, sidecarContainersEnabled bool) {
var tpd *testPodData var tpd *testPodData
var found podResMap var found podResMap
@ -313,6 +429,7 @@ func podresourcesListTests(ctx context.Context, f *framework.Framework, cli kube
cntName: "cnt-00", cntName: "cnt-00",
}, },
} }
tpd.createPodsForTest(ctx, f, expected) tpd.createPodsForTest(ctx, f, expected)
expectPodResources(ctx, 1, cli, expected) expectPodResources(ctx, 1, cli, expected)
tpd.deletePodsForTest(ctx, f) tpd.deletePodsForTest(ctx, f)
@ -532,6 +649,73 @@ func podresourcesListTests(ctx context.Context, f *framework.Framework, cli kube
expectPodResources(ctx, 1, cli, expected) expectPodResources(ctx, 1, cli, expected)
tpd.deletePodsForTest(ctx, f) tpd.deletePodsForTest(ctx, f)
if sidecarContainersEnabled {
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
tpd = newTestPodData()
ginkgo.By("checking the output when pods have init containers")
if sd != nil {
expected = []podDesc{
{
podName: "pod-00",
cntName: "regular-00",
cpuRequest: 1000,
initContainers: []initContainerDesc{
{
cntName: "init-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuRequest: 1000,
},
},
},
{
podName: "pod-01",
cntName: "regular-00",
cpuRequest: 1000,
initContainers: []initContainerDesc{
{
cntName: "restartable-init-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuRequest: 1000,
restartPolicy: &containerRestartPolicyAlways,
},
},
},
}
} else {
expected = []podDesc{
{
podName: "pod-00",
cntName: "regular-00",
cpuRequest: 1000,
initContainers: []initContainerDesc{
{
cntName: "init-00",
cpuRequest: 1000,
},
},
},
{
podName: "pod-01",
cntName: "regular-00",
cpuRequest: 1000,
initContainers: []initContainerDesc{
{
cntName: "restartable-init-00",
cpuRequest: 1000,
restartPolicy: &containerRestartPolicyAlways,
},
},
},
}
}
tpd.createPodsForTest(ctx, f, expected)
expectPodResources(ctx, 1, cli, expected)
tpd.deletePodsForTest(ctx, f)
}
} }
func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) { func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
@ -573,7 +757,7 @@ func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpo
} }
} }
func podresourcesGetTests(ctx context.Context, f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient) { func podresourcesGetTests(ctx context.Context, f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sidecarContainersEnabled bool) {
//var err error //var err error
ginkgo.By("checking the output when no pods are present") ginkgo.By("checking the output when no pods are present")
expected := []podDesc{} expected := []podDesc{}
@ -618,6 +802,39 @@ func podresourcesGetTests(ctx context.Context, f *framework.Framework, cli kubel
err = matchPodDescWithResources(expected, res) err = matchPodDescWithResources(expected, res)
framework.ExpectNoError(err, "matchPodDescWithResources() failed err %v", err) framework.ExpectNoError(err, "matchPodDescWithResources() failed err %v", err)
tpd.deletePodsForTest(ctx, f) tpd.deletePodsForTest(ctx, f)
if sidecarContainersEnabled {
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
tpd = newTestPodData()
ginkgo.By("checking the output when only pod with init containers require CPU")
expected = []podDesc{
{
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 2000,
initContainers: []initContainerDesc{
{
cntName: "init-00",
cpuRequest: 1000,
},
{
cntName: "restartable-init-01",
cpuRequest: 2000,
restartPolicy: &containerRestartPolicyAlways,
},
},
},
}
tpd.createPodsForTest(ctx, f, expected)
resp, err = cli.Get(ctx, &kubeletpodresourcesv1.GetPodResourcesRequest{PodName: "pod-01", PodNamespace: f.Namespace.Name})
framework.ExpectNoError(err, "Get() call failed for pod %s/%s", f.Namespace.Name, "pod-01")
podResourceList = []*kubeletpodresourcesv1.PodResources{resp.GetPodResources()}
res = convertToMap(podResourceList)
err = matchPodDescWithResources(expected, res)
framework.ExpectNoError(err, "matchPodDescWithResources() failed err %v", err)
tpd.deletePodsForTest(ctx, f)
}
} }
// Serial because the test updates kubelet configuration. // Serial because the test updates kubelet configuration.
@ -676,7 +893,32 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
waitForSRIOVResources(ctx, f, sd) waitForSRIOVResources(ctx, f, sd)
ginkgo.By("checking List()") ginkgo.By("checking List()")
podresourcesListTests(ctx, f, cli, sd) podresourcesListTests(ctx, f, cli, sd, false)
ginkgo.By("checking GetAllocatableResources()")
podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs)
})
framework.It("should return the expected responses", nodefeature.SidecarContainers, func(ctx context.Context) {
onlineCPUs, err := getOnlineCPUs()
framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err)
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
sd := setupSRIOVConfigOrFail(ctx, f, configMap)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
waitForSRIOVResources(ctx, f, sd)
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err, "LocalEndpoint() failed err: %v", err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err, "GetV1Client() failed err: %v", err)
defer framework.ExpectNoError(conn.Close())
waitForSRIOVResources(ctx, f, sd)
ginkgo.By("checking List()")
podresourcesListTests(ctx, f, cli, sd, true)
ginkgo.By("checking GetAllocatableResources()") ginkgo.By("checking GetAllocatableResources()")
podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs) podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs)
}) })
@ -755,9 +997,27 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
framework.ExpectNoError(err, "GetV1Client() failed err: %v", err) framework.ExpectNoError(err, "GetV1Client() failed err: %v", err)
defer conn.Close() defer conn.Close()
podresourcesListTests(ctx, f, cli, nil) podresourcesListTests(ctx, f, cli, nil, false)
podresourcesGetAllocatableResourcesTests(ctx, cli, nil, onlineCPUs, reservedSystemCPUs) podresourcesGetAllocatableResourcesTests(ctx, cli, nil, onlineCPUs, reservedSystemCPUs)
podresourcesGetTests(ctx, f, cli) podresourcesGetTests(ctx, f, cli, false)
})
framework.It("should return the expected responses", nodefeature.SidecarContainers, func(ctx context.Context) {
onlineCPUs, err := getOnlineCPUs()
framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err)
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err, "LocalEndpoint() failed err: %v", err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err, "GetV1Client() failed err: %v", err)
defer func() {
framework.ExpectNoError(conn.Close())
}()
podresourcesListTests(ctx, f, cli, nil, true)
podresourcesGetAllocatableResourcesTests(ctx, cli, nil, onlineCPUs, reservedSystemCPUs)
podresourcesGetTests(ctx, f, cli, true)
}) })
ginkgo.It("should account for resources of pods in terminal phase", func(ctx context.Context) { ginkgo.It("should account for resources of pods in terminal phase", func(ctx context.Context) {
pd := podDesc{ pd := podDesc{