From 5e039989910acb9b201f8c1f17e039edcca5563f Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Tue, 21 Feb 2023 08:48:06 +0100 Subject: [PATCH 1/6] kubelet: podresources: pack parameters in a struct To enable rate limiting, needed for GA graduation, we need to pass more parameters to the already crowded `ListenAndServePodresources` function. To tidy up a bit, pack the parameters in a helper struct, with no intended changes in behavior. Signed-off-by: Francesco Romani --- pkg/kubelet/apis/podresources/server_v1.go | 12 ++++++------ pkg/kubelet/apis/podresources/server_v1_test.go | 16 ++++++++++++++-- pkg/kubelet/apis/podresources/server_v1alpha1.go | 8 ++++---- .../apis/podresources/server_v1alpha1_test.go | 7 ++++++- pkg/kubelet/apis/podresources/types.go | 7 +++++++ pkg/kubelet/kubelet.go | 2 +- pkg/kubelet/server/server.go | 6 +++--- 7 files changed, 41 insertions(+), 17 deletions(-) diff --git a/pkg/kubelet/apis/podresources/server_v1.go b/pkg/kubelet/apis/podresources/server_v1.go index 20774a8cc11..9d7bd55243c 100644 --- a/pkg/kubelet/apis/podresources/server_v1.go +++ b/pkg/kubelet/apis/podresources/server_v1.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubelet/pkg/apis/podresources/v1" ) -// podResourcesServerV1alpha1 implements PodResourcesListerServer +// v1PodResourcesServer implements PodResourcesListerServer type v1PodResourcesServer struct { podsProvider PodsProvider devicesProvider DevicesProvider @@ -37,12 +37,12 @@ type v1PodResourcesServer struct { // NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider // with device information provided by the DevicesProvider -func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider, cpusProvider CPUsProvider, memoryProvider MemoryProvider) v1.PodResourcesListerServer { +func NewV1PodResourcesServer(providers PodResourcesProviders) v1.PodResourcesListerServer { return &v1PodResourcesServer{ - podsProvider: podsProvider, - devicesProvider: devicesProvider, - cpusProvider: cpusProvider, - memoryProvider: memoryProvider, + podsProvider: providers.Pods, + devicesProvider: providers.Devices, + cpusProvider: providers.Cpus, + memoryProvider: providers.Memory, } } diff --git a/pkg/kubelet/apis/podresources/server_v1_test.go b/pkg/kubelet/apis/podresources/server_v1_test.go index 1c09bbe5607..47816c8b640 100644 --- a/pkg/kubelet/apis/podresources/server_v1_test.go +++ b/pkg/kubelet/apis/podresources/server_v1_test.go @@ -172,7 +172,13 @@ func TestListPodResourcesV1(t *testing.T) { mockDevicesProvider.EXPECT().GetAllocatableDevices().Return([]*podresourcesapi.ContainerDevices{}).AnyTimes() mockMemoryProvider.EXPECT().GetAllocatableMemory().Return([]*podresourcesapi.ContainerMemory{}).AnyTimes() - server := NewV1PodResourcesServer(mockPodsProvider, mockDevicesProvider, mockCPUsProvider, mockMemoryProvider) + providers := PodResourcesProviders{ + Pods: mockPodsProvider, + Devices: mockDevicesProvider, + Cpus: mockCPUsProvider, + Memory: mockMemoryProvider, + } + server := NewV1PodResourcesServer(providers) resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{}) if err != nil { t.Errorf("want err = %v, got %q", nil, err) @@ -459,7 +465,13 @@ func TestAllocatableResources(t *testing.T) { mockCPUsProvider.EXPECT().GetAllocatableCPUs().Return(tc.allCPUs).AnyTimes() mockMemoryProvider.EXPECT().GetAllocatableMemory().Return(tc.allMemory).AnyTimes() - server := NewV1PodResourcesServer(mockPodsProvider, mockDevicesProvider, mockCPUsProvider, mockMemoryProvider) + providers := PodResourcesProviders{ + Pods: mockPodsProvider, + Devices: mockDevicesProvider, + Cpus: mockCPUsProvider, + Memory: mockMemoryProvider, + } + server := NewV1PodResourcesServer(providers) resp, err := server.GetAllocatableResources(context.TODO(), &podresourcesapi.AllocatableResourcesRequest{}) if err != nil { diff --git a/pkg/kubelet/apis/podresources/server_v1alpha1.go b/pkg/kubelet/apis/podresources/server_v1alpha1.go index baacd2722fb..3e029e3c402 100644 --- a/pkg/kubelet/apis/podresources/server_v1alpha1.go +++ b/pkg/kubelet/apis/podresources/server_v1alpha1.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubelet/pkg/apis/podresources/v1alpha1" ) -// podResourcesServerV1alpha1 implements PodResourcesListerServer +// v1alpha1PodResourcesServer implements PodResourcesListerServer type v1alpha1PodResourcesServer struct { podsProvider PodsProvider devicesProvider DevicesProvider @@ -33,10 +33,10 @@ type v1alpha1PodResourcesServer struct { // NewV1alpha1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider // with device information provided by the DevicesProvider -func NewV1alpha1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider) v1alpha1.PodResourcesListerServer { +func NewV1alpha1PodResourcesServer(providers PodResourcesProviders) v1alpha1.PodResourcesListerServer { return &v1alpha1PodResourcesServer{ - podsProvider: podsProvider, - devicesProvider: devicesProvider, + podsProvider: providers.Pods, + devicesProvider: providers.Devices, } } diff --git a/pkg/kubelet/apis/podresources/server_v1alpha1_test.go b/pkg/kubelet/apis/podresources/server_v1alpha1_test.go index 45399ba7c8b..1dc289d89b3 100644 --- a/pkg/kubelet/apis/podresources/server_v1alpha1_test.go +++ b/pkg/kubelet/apis/podresources/server_v1alpha1_test.go @@ -134,7 +134,12 @@ func TestListPodResourcesV1alpha1(t *testing.T) { mockDevicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(tc.devices).AnyTimes() mockDevicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes() - server := NewV1alpha1PodResourcesServer(mockPodsProvider, mockDevicesProvider) + + providers := PodResourcesProviders{ + Pods: mockPodsProvider, + Devices: mockDevicesProvider, + } + server := NewV1alpha1PodResourcesServer(providers) resp, err := server.List(context.TODO(), &v1alpha1.ListPodResourcesRequest{}) if err != nil { t.Errorf("want err = %v, got %q", nil, err) diff --git a/pkg/kubelet/apis/podresources/types.go b/pkg/kubelet/apis/podresources/types.go index b6011d12976..6b63b777c74 100644 --- a/pkg/kubelet/apis/podresources/types.go +++ b/pkg/kubelet/apis/podresources/types.go @@ -51,3 +51,10 @@ type MemoryProvider interface { // GetAllocatableMemory returns the allocatable memory from the node GetAllocatableMemory() []*podresourcesapi.ContainerMemory } + +type PodResourcesProviders struct { + Pods PodsProvider + Devices DevicesProvider + Cpus CPUsProvider + Memory MemoryProvider +} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 302c70df015..cec1e4d6f96 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2752,7 +2752,7 @@ func (kl *Kubelet) ListenAndServePodResources() { return } - providers := server.PodResourcesProviders{ + providers := podresources.PodResourcesProviders{ Pods: kl.podManager, Devices: kl.containerManager, Cpus: kl.containerManager, diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 74c5dba9951..fc060cef4aa 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -218,11 +218,11 @@ type PodResourcesProviders struct { } // ListenAndServePodResources initializes a gRPC server to serve the PodResources service -func ListenAndServePodResources(socket string, providers PodResourcesProviders) { +func ListenAndServePodResources(socket string, providers podresources.PodResourcesProviders) { server := grpc.NewServer(podresourcesgrpc.WithRateLimiter(podresourcesgrpc.DefaultQPS, podresourcesgrpc.DefaultBurstTokens)) - podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(providers.Pods, providers.Devices)) - podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(providers.Pods, providers.Devices, providers.Cpus, providers.Memory)) + podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(providers)) + podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(providers)) l, err := util.CreateListener(socket) if err != nil { From 91234c701e36172d910b1495017b5030a3d7238a Mon Sep 17 00:00:00 2001 From: Moshe Levi Date: Fri, 17 Feb 2023 02:45:28 +0200 Subject: [PATCH 2/6] kube features: add KubeletPodResourcesDynamicResources and KubeletPodResourcesGet Signed-off-by: Moshe Levi --- pkg/features/kube_features.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 2a71f42928e..4ba330390b1 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -439,6 +439,18 @@ const ( // Enables the kubelet's pod resources grpc endpoint KubeletPodResources featuregate.Feature = "KubeletPodResources" + // owner: @moshe010 + // alpha: v1.27 + // + // Enable POD resources API to return resources allocated by Dynamic Resource Allocation + KubeletPodResourcesDynamicResources featuregate.Feature = "KubeletPodResourcesDynamicResources" + + // owner: @moshe010 + // alpha: v1.27 + // + // Enable POD resources API with Get method + KubeletPodResourcesGet featuregate.Feature = "KubeletPodResourcesGet" + // owner: @fromanirh // alpha: v1.21 // beta: v1.23 @@ -988,6 +1000,10 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS KubeletPodResources: {Default: true, PreRelease: featuregate.Beta}, + KubeletPodResourcesDynamicResources: {Default: false, PreRelease: featuregate.Alpha}, + + KubeletPodResourcesGet: {Default: false, PreRelease: featuregate.Alpha}, + KubeletPodResourcesGetAllocatable: {Default: true, PreRelease: featuregate.Beta}, KubeletTracing: {Default: false, PreRelease: featuregate.Alpha}, From 71d6e4d53c43329a701391ee4d8bb528d762d28b Mon Sep 17 00:00:00 2001 From: Moshe Levi Date: Fri, 17 Feb 2023 02:50:48 +0200 Subject: [PATCH 3/6] kubelet metrics: add pod resources get metrics Signed-off-by: Moshe Levi --- pkg/kubelet/metrics/metrics.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/pkg/kubelet/metrics/metrics.go b/pkg/kubelet/metrics/metrics.go index 7eeabcaf6e6..e0395d3292b 100644 --- a/pkg/kubelet/metrics/metrics.go +++ b/pkg/kubelet/metrics/metrics.go @@ -78,6 +78,8 @@ const ( PodResourcesEndpointRequestsGetAllocatableKey = "pod_resources_endpoint_requests_get_allocatable" PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list" PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable" + PodResourcesEndpointRequestsGetKey = "pod_resources_endpoint_requests_get" + PodResourcesEndpointErrorsGetKey = "pod_resources_endpoint_errors_get" // Metrics keys for RuntimeClass RunPodSandboxDurationKey = "run_podsandbox_duration_seconds" @@ -441,6 +443,30 @@ var ( []string{"server_api_version"}, ) + // PodResourcesEndpointRequestsGetCount is a Counter that tracks the number of requests to the PodResource Get() endpoint. + // Broken down by server API version. + PodResourcesEndpointRequestsGetCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: PodResourcesEndpointRequestsGetKey, + Help: "Number of requests to the PodResource Get endpoint. Broken down by server api version.", + StabilityLevel: metrics.ALPHA, + }, + []string{"server_api_version"}, + ) + + // PodResourcesEndpointErrorsGetCount is a Counter that tracks the number of errors returned by he PodResource List() endpoint. + // Broken down by server API version. + PodResourcesEndpointErrorsGetCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: PodResourcesEndpointErrorsGetKey, + Help: "Number of requests to the PodResource Get endpoint which returned error. Broken down by server api version.", + StabilityLevel: metrics.ALPHA, + }, + []string{"server_api_version"}, + ) + // RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations. // Broken down by RuntimeClass.Handler. RunPodSandboxDuration = metrics.NewHistogramVec( @@ -759,6 +785,10 @@ func Register(collectors ...metrics.StableCollector) { legacyregistry.MustRegister(PodResourcesEndpointErrorsListCount) legacyregistry.MustRegister(PodResourcesEndpointErrorsGetAllocatableCount) } + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGet) { + legacyregistry.MustRegister(PodResourcesEndpointRequestsGetCount) + legacyregistry.MustRegister(PodResourcesEndpointErrorsGetCount) + } } legacyregistry.MustRegister(StartedPodsTotal) legacyregistry.MustRegister(StartedPodsErrorsTotal) From 9c5761391261401c8c05fc213c0cf470baacbf94 Mon Sep 17 00:00:00 2001 From: Moshe Levi Date: Mon, 13 Mar 2023 16:40:49 +0200 Subject: [PATCH 4/6] Add ClassName to chekpoint state and in-memory cache Signed-off-by: Moshe Levi --- .../apis/podresources/server_v1alpha1_test.go | 1 - pkg/kubelet/cm/dra/claiminfo.go | 4 +++- pkg/kubelet/cm/dra/manager.go | 1 + pkg/kubelet/cm/dra/state/state_checkpoint.go | 3 +++ pkg/kubelet/cm/dra/state/state_checkpoint_test.go | 15 ++++++++++----- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/pkg/kubelet/apis/podresources/server_v1alpha1_test.go b/pkg/kubelet/apis/podresources/server_v1alpha1_test.go index 1dc289d89b3..adcb2fb2818 100644 --- a/pkg/kubelet/apis/podresources/server_v1alpha1_test.go +++ b/pkg/kubelet/apis/podresources/server_v1alpha1_test.go @@ -134,7 +134,6 @@ func TestListPodResourcesV1alpha1(t *testing.T) { mockDevicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(tc.devices).AnyTimes() mockDevicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes() - providers := PodResourcesProviders{ Pods: mockPodsProvider, Devices: mockDevicesProvider, diff --git a/pkg/kubelet/cm/dra/claiminfo.go b/pkg/kubelet/cm/dra/claiminfo.go index 89f1a68c884..318b0debf27 100644 --- a/pkg/kubelet/cm/dra/claiminfo.go +++ b/pkg/kubelet/cm/dra/claiminfo.go @@ -57,9 +57,10 @@ type claimInfoCache struct { claimInfo map[string]*claimInfo } -func newClaimInfo(driverName string, claimUID types.UID, claimName, namespace string, podUIDs sets.Set[string]) *claimInfo { +func newClaimInfo(driverName, className string, claimUID types.UID, claimName, namespace string, podUIDs sets.Set[string]) *claimInfo { claimInfoState := state.ClaimInfoState{ DriverName: driverName, + ClassName: className, ClaimUID: claimUID, ClaimName: claimName, Namespace: namespace, @@ -110,6 +111,7 @@ func newClaimInfoCache(stateDir, checkpointName string) (*claimInfoCache, error) for _, entry := range curState { info := newClaimInfo( entry.DriverName, + entry.ClassName, entry.ClaimUID, entry.ClaimName, entry.Namespace, diff --git a/pkg/kubelet/cm/dra/manager.go b/pkg/kubelet/cm/dra/manager.go index 3dfdb9cff75..b85853fc7c9 100644 --- a/pkg/kubelet/cm/dra/manager.go +++ b/pkg/kubelet/cm/dra/manager.go @@ -122,6 +122,7 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { // Create a claimInfo object to store the relevant claim info. claimInfo := newClaimInfo( resourceClaim.Status.DriverName, + resourceClaim.Spec.ResourceClassName, resourceClaim.UID, resourceClaim.Name, resourceClaim.Namespace, diff --git a/pkg/kubelet/cm/dra/state/state_checkpoint.go b/pkg/kubelet/cm/dra/state/state_checkpoint.go index 78da262bcb9..a3a310a27e0 100644 --- a/pkg/kubelet/cm/dra/state/state_checkpoint.go +++ b/pkg/kubelet/cm/dra/state/state_checkpoint.go @@ -39,6 +39,9 @@ type ClaimInfoState struct { // Name of the DRA driver DriverName string + // ClassName is a resource class of the claim + ClassName string + // ClaimUID is an UID of the resource claim ClaimUID types.UID diff --git a/pkg/kubelet/cm/dra/state/state_checkpoint_test.go b/pkg/kubelet/cm/dra/state/state_checkpoint_test.go index 0ed650d98e5..e21405aba2c 100644 --- a/pkg/kubelet/cm/dra/state/state_checkpoint_test.go +++ b/pkg/kubelet/cm/dra/state/state_checkpoint_test.go @@ -50,11 +50,12 @@ func TestCheckpointGetOrCreate(t *testing.T) { }, { "Restore checkpoint - single claim", - `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":1988120167}`, + `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":153446146}`, "", []ClaimInfoState{ { DriverName: "test-driver.cdi.k8s.io", + ClassName: "class-name", ClaimUID: "067798be-454e-4be4-9047-1aa06aea63f7", ClaimName: "example", Namespace: "default", @@ -67,11 +68,12 @@ func TestCheckpointGetOrCreate(t *testing.T) { }, { "Restore checkpoint - single claim - multiple devices", - `{"version":"v1","entries":[{"DriverName":"meta-test-driver.cdi.k8s.io","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver-1.cdi.k8s.io":["example-1.com/example-1=cdi-example-1"],"test-driver-2.cdi.k8s.io":["example-2.com/example-2=cdi-example-2"]}}],"checksum":2113538068}`, + `{"version":"v1","entries":[{"DriverName":"meta-test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver-1.cdi.k8s.io":["example-1.com/example-1=cdi-example-1"],"test-driver-2.cdi.k8s.io":["example-2.com/example-2=cdi-example-2"]}}],"checksum":1363630443}`, "", []ClaimInfoState{ { DriverName: "meta-test-driver.cdi.k8s.io", + ClassName: "class-name", ClaimUID: "067798be-454e-4be4-9047-1aa06aea63f7", ClaimName: "example", Namespace: "default", @@ -85,11 +87,12 @@ func TestCheckpointGetOrCreate(t *testing.T) { }, { "Restore checkpoint - multiple claims", - `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example-1","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-1"]}},{"DriverName":"test-driver.cdi.k8s.io","ClaimUID":"4cf8db2d-06c0-7d70-1a51-e59b25b2c16c","ClaimName":"example-2","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-2"]}}],"checksum":666680545}`, + `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name-1","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example-1","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-1"]}},{"DriverName":"test-driver.cdi.k8s.io","ClaimUID":"4cf8db2d-06c0-7d70-1a51-e59b25b2c16c","ClassName":"class-name-2","ClaimName":"example-2","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-2"]}}],"checksum":1978566460}`, "", []ClaimInfoState{ { DriverName: "test-driver.cdi.k8s.io", + ClassName: "class-name-1", ClaimUID: "067798be-454e-4be4-9047-1aa06aea63f7", ClaimName: "example-1", Namespace: "default", @@ -100,6 +103,7 @@ func TestCheckpointGetOrCreate(t *testing.T) { }, { DriverName: "test-driver.cdi.k8s.io", + ClassName: "class-name-2", ClaimUID: "4cf8db2d-06c0-7d70-1a51-e59b25b2c16c", ClaimName: "example-2", Namespace: "default", @@ -112,7 +116,7 @@ func TestCheckpointGetOrCreate(t *testing.T) { }, { "Restore checkpoint - invalid checksum", - `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":1988120168}`, + `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":1988120168}`, "checkpoint is corrupted", []ClaimInfoState{}, }, @@ -167,6 +171,7 @@ func TestCheckpointGetOrCreate(t *testing.T) { func TestCheckpointStateStore(t *testing.T) { claimInfoState := ClaimInfoState{ DriverName: "test-driver.cdi.k8s.io", + ClassName: "class-name", ClaimUID: "067798be-454e-4be4-9047-1aa06aea63f7", ClaimName: "example", Namespace: "default", @@ -176,7 +181,7 @@ func TestCheckpointStateStore(t *testing.T) { }, } - expectedCheckpoint := `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":1988120167}` + expectedCheckpoint := `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":153446146}` // create temp dir testingDir, err := os.MkdirTemp("", "dramanager_state_test") From 2a568bcfc821edd46e43cd072ba2a23456c9605e Mon Sep 17 00:00:00 2001 From: Moshe Levi Date: Tue, 14 Mar 2023 01:34:54 +0200 Subject: [PATCH 5/6] kubelet podresources: extend List to support Dynamic Resources and implement Get API Signed-off-by: Moshe Levi --- pkg/kubelet/apis/podresources/server_v1.go | 64 +- .../podresources/testing/provider_mock.go | 52 + pkg/kubelet/apis/podresources/types.go | 17 +- pkg/kubelet/cm/container_manager.go | 3 +- pkg/kubelet/cm/container_manager_linux.go | 35 + pkg/kubelet/cm/container_manager_stub.go | 4 + pkg/kubelet/cm/container_manager_windows.go | 4 + pkg/kubelet/cm/dra/claiminfo.go | 22 +- pkg/kubelet/cm/dra/manager.go | 21 + pkg/kubelet/cm/dra/types.go | 3 + pkg/kubelet/cm/fake_container_manager.go | 4 + pkg/kubelet/kubelet.go | 9 +- .../pkg/apis/podresources/v1/api.pb.go | 1353 ++++++++++++++++- .../pkg/apis/podresources/v1/api.proto | 35 + 14 files changed, 1559 insertions(+), 67 deletions(-) diff --git a/pkg/kubelet/apis/podresources/server_v1.go b/pkg/kubelet/apis/podresources/server_v1.go index 9d7bd55243c..ad6734ae729 100644 --- a/pkg/kubelet/apis/podresources/server_v1.go +++ b/pkg/kubelet/apis/podresources/server_v1.go @@ -29,20 +29,22 @@ import ( // v1PodResourcesServer implements PodResourcesListerServer type v1PodResourcesServer struct { - podsProvider PodsProvider - devicesProvider DevicesProvider - cpusProvider CPUsProvider - memoryProvider MemoryProvider + podsProvider PodsProvider + devicesProvider DevicesProvider + cpusProvider CPUsProvider + memoryProvider MemoryProvider + dynamicResourcesProvider DynamicResourcesProvider } // NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider // with device information provided by the DevicesProvider func NewV1PodResourcesServer(providers PodResourcesProviders) v1.PodResourcesListerServer { return &v1PodResourcesServer{ - podsProvider: providers.Pods, - devicesProvider: providers.Devices, - cpusProvider: providers.Cpus, - memoryProvider: providers.Memory, + podsProvider: providers.Pods, + devicesProvider: providers.Devices, + cpusProvider: providers.Cpus, + memoryProvider: providers.Memory, + dynamicResourcesProvider: providers.DynamicResources, } } @@ -69,6 +71,10 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name), } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) { + pRes.Containers[j].DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, &container) + } + } podResources[i] = &pRes } @@ -85,7 +91,7 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesGetAllocatable) { metrics.PodResourcesEndpointErrorsGetAllocatableCount.WithLabelValues("v1").Inc() - return nil, fmt.Errorf("Pod Resources API GetAllocatableResources disabled") + return nil, fmt.Errorf("PodResources API GetAllocatableResources disabled") } return &v1.AllocatableResourcesResponse{ @@ -94,3 +100,43 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req Memory: p.memoryProvider.GetAllocatableMemory(), }, nil } + +// Get returns information about the resources assigned to a specific pod +func (p *v1PodResourcesServer) Get(ctx context.Context, req *v1.GetPodResourcesRequest) (*v1.GetPodResourcesResponse, error) { + metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() + metrics.PodResourcesEndpointRequestsGetCount.WithLabelValues("v1").Inc() + + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesGet) { + metrics.PodResourcesEndpointErrorsGetCount.WithLabelValues("v1").Inc() + return nil, fmt.Errorf("PodResources API Get method disabled") + } + + pod, exist := p.podsProvider.GetPodByName(req.PodNamespace, req.PodName) + if !exist { + metrics.PodResourcesEndpointErrorsGetCount.WithLabelValues("v1").Inc() + return nil, fmt.Errorf("pod %s in namespace %s not found", req.PodName, req.PodNamespace) + } + + podResources := &v1.PodResources{ + Name: pod.Name, + Namespace: pod.Namespace, + Containers: make([]*v1.ContainerResources, len(pod.Spec.Containers)), + } + + for i, container := range pod.Spec.Containers { + podResources.Containers[i] = &v1.ContainerResources{ + Name: container.Name, + Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), + CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), + Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name), + } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) { + podResources.Containers[i].DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, &container) + } + } + + response := &v1.GetPodResourcesResponse{ + PodResources: podResources, + } + return response, nil +} diff --git a/pkg/kubelet/apis/podresources/testing/provider_mock.go b/pkg/kubelet/apis/podresources/testing/provider_mock.go index 05b535523fe..4f436c1854a 100644 --- a/pkg/kubelet/apis/podresources/testing/provider_mock.go +++ b/pkg/kubelet/apis/podresources/testing/provider_mock.go @@ -114,6 +114,21 @@ func (m *MockPodsProvider) EXPECT() *MockPodsProviderMockRecorder { return m.recorder } +// GetPodByName mocks base method. +func (m *MockPodsProvider) GetPodByName(namespace, name string) (*v1.Pod, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodByName", namespace, name) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetPodByName indicates an expected call of GetPodByName. +func (mr *MockPodsProviderMockRecorder) GetPodByName(namespace, name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodByName", reflect.TypeOf((*MockPodsProvider)(nil).GetPodByName), namespace, name) +} + // GetPods mocks base method. func (m *MockPodsProvider) GetPods() []*v1.Pod { m.ctrl.T.Helper() @@ -229,3 +244,40 @@ func (mr *MockMemoryProviderMockRecorder) GetMemory(podUID, containerName interf mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemory", reflect.TypeOf((*MockMemoryProvider)(nil).GetMemory), podUID, containerName) } + +// MockDynamicResourcesProvider is a mock of DynamicResourcesProvider interface. +type MockDynamicResourcesProvider struct { + ctrl *gomock.Controller + recorder *MockDynamicResourcesProviderMockRecorder +} + +// MockDynamicResourcesProviderMockRecorder is the mock recorder for MockDynamicResourcesProvider. +type MockDynamicResourcesProviderMockRecorder struct { + mock *MockDynamicResourcesProvider +} + +// NewMockDynamicResourcesProvider creates a new mock instance. +func NewMockDynamicResourcesProvider(ctrl *gomock.Controller) *MockDynamicResourcesProvider { + mock := &MockDynamicResourcesProvider{ctrl: ctrl} + mock.recorder = &MockDynamicResourcesProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDynamicResourcesProvider) EXPECT() *MockDynamicResourcesProviderMockRecorder { + return m.recorder +} + +// GetDynamicResources mocks base method. +func (m *MockDynamicResourcesProvider) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*v10.DynamicResource { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDynamicResources", pod, container) + ret0, _ := ret[0].([]*v10.DynamicResource) + return ret0 +} + +// GetDynamicResources indicates an expected call of GetDynamicResources. +func (mr *MockDynamicResourcesProviderMockRecorder) GetDynamicResources(pod, container interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDynamicResources", reflect.TypeOf((*MockDynamicResourcesProvider)(nil).GetDynamicResources), pod, container) +} diff --git a/pkg/kubelet/apis/podresources/types.go b/pkg/kubelet/apis/podresources/types.go index 6b63b777c74..155d42abbcc 100644 --- a/pkg/kubelet/apis/podresources/types.go +++ b/pkg/kubelet/apis/podresources/types.go @@ -35,6 +35,7 @@ type DevicesProvider interface { // PodsProvider knows how to provide the pods admitted by the node type PodsProvider interface { GetPods() []*v1.Pod + GetPodByName(namespace, name string) (*v1.Pod, bool) } // CPUsProvider knows how to provide the cpus used by the given container @@ -52,9 +53,15 @@ type MemoryProvider interface { GetAllocatableMemory() []*podresourcesapi.ContainerMemory } -type PodResourcesProviders struct { - Pods PodsProvider - Devices DevicesProvider - Cpus CPUsProvider - Memory MemoryProvider +type DynamicResourcesProvider interface { + // GetDynamicResources returns information about dynamic resources assigned to pods and containers + GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource +} + +type PodResourcesProviders struct { + Pods PodsProvider + Devices DevicesProvider + Cpus CPUsProvider + Memory MemoryProvider + DynamicResources DynamicResourcesProvider } diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index b9aede1a3ff..80c1af0aa83 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -127,10 +127,11 @@ type ContainerManager interface { // might need to unprepare resources. PodMightNeedToUnprepareResources(UID types.UID) bool - // Implements the podresources Provider API for CPUs, Memory and Devices + // Implements the PodResources Provider API podresources.CPUsProvider podresources.DevicesProvider podresources.MemoryProvider + podresources.DynamicResourcesProvider } type NodeConfig struct { diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 9c1eb1713d3..02cb34ddcdc 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -965,6 +965,41 @@ func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.Contai return containerMemoryFromBlock(cm.memoryManager.GetAllocatableMemory()) } +func (cm *containerManagerImpl) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource { + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) { + return []*podresourcesapi.DynamicResource{} + } + + var containerDynamicResources []*podresourcesapi.DynamicResource + containerClaimInfos, err := cm.draManager.GetContainerClaimInfos(pod, container) + if err != nil { + klog.ErrorS(err, "Unable to get container claim info state") + return []*podresourcesapi.DynamicResource{} + } + for _, containerClaimInfo := range containerClaimInfos { + var claimResources []*podresourcesapi.ClaimResource + // TODO: Currently we maintain a list of ClaimResources, each of which contains + // a set of CDIDevices from a different kubelet plugin. In the future we may want to + // include the name of the kubelet plugin and/or other types of resources that are + // not CDIDevices (assuming the DRAmanager supports this). + for _, klPluginCdiDevices := range containerClaimInfo.CDIDevices { + var cdiDevices []*podresourcesapi.CDIDevice + for _, cdiDevice := range klPluginCdiDevices { + cdiDevices = append(cdiDevices, &podresourcesapi.CDIDevice{Name: cdiDevice}) + } + claimResources = append(claimResources, &podresourcesapi.ClaimResource{CDIDevices: cdiDevices}) + } + containerDynamicResource := podresourcesapi.DynamicResource{ + ClassName: containerClaimInfo.ClassName, + ClaimName: containerClaimInfo.ClaimName, + ClaimNamespace: containerClaimInfo.Namespace, + ClaimResources: claimResources, + } + containerDynamicResources = append(containerDynamicResources, &containerDynamicResource) + } + return containerDynamicResources +} + func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { return cm.deviceManager.ShouldResetExtendedResourceCapacity() } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 56176e22d9c..940695b63fa 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -159,6 +159,10 @@ func (cm *containerManagerStub) GetAllocatableMemory() []*podresourcesapi.Contai return nil } +func (cm *containerManagerStub) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource { + return nil +} + func (cm *containerManagerStub) GetNodeAllocatableAbsolute() v1.ResourceList { return nil } diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index a192341bf0b..c26a0683769 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -253,6 +253,10 @@ func (cm *containerManagerImpl) GetNodeAllocatableAbsolute() v1.ResourceList { return nil } +func (cm *containerManagerImpl) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource { + return nil +} + func (cm *containerManagerImpl) PrepareDynamicResources(pod *v1.Pod) error { return nil } diff --git a/pkg/kubelet/cm/dra/claiminfo.go b/pkg/kubelet/cm/dra/claiminfo.go index 318b0debf27..e70e79b0386 100644 --- a/pkg/kubelet/cm/dra/claiminfo.go +++ b/pkg/kubelet/cm/dra/claiminfo.go @@ -26,9 +26,9 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) -// claimInfo holds information required +// ClaimInfo holds information required // to prepare and unprepare a resource claim. -type claimInfo struct { +type ClaimInfo struct { sync.RWMutex state.ClaimInfoState // annotations is a list of container annotations associated with @@ -36,14 +36,14 @@ type claimInfo struct { annotations []kubecontainer.Annotation } -func (res *claimInfo) addPodReference(podUID types.UID) { +func (res *ClaimInfo) addPodReference(podUID types.UID) { res.Lock() defer res.Unlock() res.PodUIDs.Insert(string(podUID)) } -func (res *claimInfo) deletePodReference(podUID types.UID) { +func (res *ClaimInfo) deletePodReference(podUID types.UID) { res.Lock() defer res.Unlock() @@ -54,10 +54,10 @@ func (res *claimInfo) deletePodReference(podUID types.UID) { type claimInfoCache struct { sync.RWMutex state state.CheckpointState - claimInfo map[string]*claimInfo + claimInfo map[string]*ClaimInfo } -func newClaimInfo(driverName, className string, claimUID types.UID, claimName, namespace string, podUIDs sets.Set[string]) *claimInfo { +func newClaimInfo(driverName, className string, claimUID types.UID, claimName, namespace string, podUIDs sets.Set[string]) *ClaimInfo { claimInfoState := state.ClaimInfoState{ DriverName: driverName, ClassName: className, @@ -66,13 +66,13 @@ func newClaimInfo(driverName, className string, claimUID types.UID, claimName, n Namespace: namespace, PodUIDs: podUIDs, } - claimInfo := claimInfo{ + claimInfo := ClaimInfo{ ClaimInfoState: claimInfoState, } return &claimInfo } -func (info *claimInfo) addCDIDevices(pluginName string, cdiDevices []string) error { +func (info *ClaimInfo) addCDIDevices(pluginName string, cdiDevices []string) error { // NOTE: Passing CDI device names as annotations is a temporary solution // It will be removed after all runtimes are updated // to get CDI device names from the ContainerConfig.CDIDevices field @@ -105,7 +105,7 @@ func newClaimInfoCache(stateDir, checkpointName string) (*claimInfoCache, error) cache := &claimInfoCache{ state: stateImpl, - claimInfo: make(map[string]*claimInfo), + claimInfo: make(map[string]*ClaimInfo), } for _, entry := range curState { @@ -129,14 +129,14 @@ func newClaimInfoCache(stateDir, checkpointName string) (*claimInfoCache, error) return cache, nil } -func (cache *claimInfoCache) add(res *claimInfo) { +func (cache *claimInfoCache) add(res *ClaimInfo) { cache.Lock() defer cache.Unlock() cache.claimInfo[res.ClaimName+res.Namespace] = res } -func (cache *claimInfoCache) get(claimName, namespace string) *claimInfo { +func (cache *claimInfoCache) get(claimName, namespace string) *ClaimInfo { cache.RLock() defer cache.RUnlock() diff --git a/pkg/kubelet/cm/dra/manager.go b/pkg/kubelet/cm/dra/manager.go index b85853fc7c9..efca1a718f3 100644 --- a/pkg/kubelet/cm/dra/manager.go +++ b/pkg/kubelet/cm/dra/manager.go @@ -289,3 +289,24 @@ func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error { func (m *ManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool { return m.cache.hasPodReference(UID) } + +// GetCongtainerClaimInfos gets Container's ClaimInfo +func (m *ManagerImpl) GetContainerClaimInfos(pod *v1.Pod, container *v1.Container) ([]*ClaimInfo, error) { + claimInfos := make([]*ClaimInfo, 0, len(pod.Spec.ResourceClaims)) + + for i, podResourceClaim := range pod.Spec.ResourceClaims { + claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i]) + + for _, claim := range container.Resources.Claims { + if podResourceClaim.Name != claim.Name { + continue + } + claimInfo := m.cache.get(claimName, pod.Namespace) + if claimInfo == nil { + return nil, fmt.Errorf("unable to get resource for namespace: %s, claim: %s", pod.Namespace, claimName) + } + claimInfos = append(claimInfos, claimInfo) + } + } + return claimInfos, nil +} diff --git a/pkg/kubelet/cm/dra/types.go b/pkg/kubelet/cm/dra/types.go index 7e559a06356..58c8ca0dd65 100644 --- a/pkg/kubelet/cm/dra/types.go +++ b/pkg/kubelet/cm/dra/types.go @@ -38,6 +38,9 @@ type Manager interface { // PodMightNeedToUnprepareResources returns true if the pod with the given UID // might need to unprepare resources. PodMightNeedToUnprepareResources(UID types.UID) bool + + // GetContainerClaimInfos gets Container ClaimInfo objects + GetContainerClaimInfos(pod *v1.Pod, container *v1.Container) ([]*ClaimInfo, error) } // ContainerInfo contains information required by the runtime to consume prepared resources. diff --git a/pkg/kubelet/cm/fake_container_manager.go b/pkg/kubelet/cm/fake_container_manager.go index 306a30aa9ed..8cf4d9b7c48 100644 --- a/pkg/kubelet/cm/fake_container_manager.go +++ b/pkg/kubelet/cm/fake_container_manager.go @@ -232,6 +232,10 @@ func (cm *FakeContainerManager) GetAllocatableMemory() []*podresourcesapi.Contai return nil } +func (cm *FakeContainerManager) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource { + return nil +} + func (cm *FakeContainerManager) GetNodeAllocatableAbsolute() v1.ResourceList { cm.Lock() defer cm.Unlock() diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index cec1e4d6f96..fcf1f1f03c1 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2753,10 +2753,11 @@ func (kl *Kubelet) ListenAndServePodResources() { } providers := podresources.PodResourcesProviders{ - Pods: kl.podManager, - Devices: kl.containerManager, - Cpus: kl.containerManager, - Memory: kl.containerManager, + Pods: kl.podManager, + Devices: kl.containerManager, + Cpus: kl.containerManager, + Memory: kl.containerManager, + DynamicResources: kl.containerManager, } server.ListenAndServePodResources(socket, providers) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go index e50e2e2bb02..5ca454588ce 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go @@ -296,6 +296,7 @@ type ContainerResources struct { Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"` CpuIds []int64 `protobuf:"varint,3,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` Memory []*ContainerMemory `protobuf:"bytes,4,rep,name=memory,proto3" json:"memory,omitempty"` + DynamicResources []*DynamicResource `protobuf:"bytes,5,rep,name=dynamic_resources,json=dynamicResources,proto3" json:"dynamic_resources,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_sizecache int32 `json:"-"` } @@ -360,6 +361,13 @@ func (m *ContainerResources) GetMemory() []*ContainerMemory { return nil } +func (m *ContainerResources) GetDynamicResources() []*DynamicResource { + if m != nil { + return m.DynamicResources + } + return nil +} + // ContainerMemory contains information about memory and hugepages assigned to a container type ContainerMemory struct { MemoryType string `protobuf:"bytes,1,opt,name=memory_type,json=memoryType,proto3" json:"memory_type,omitempty"` @@ -576,6 +584,272 @@ func (m *NUMANode) GetID() int64 { return 0 } +// DynamicResource contains information about the devices assigned to a container by DRA +type DynamicResource struct { + ClassName string `protobuf:"bytes,1,opt,name=class_name,json=className,proto3" json:"class_name,omitempty"` + ClaimName string `protobuf:"bytes,2,opt,name=claim_name,json=claimName,proto3" json:"claim_name,omitempty"` + ClaimNamespace string `protobuf:"bytes,3,opt,name=claim_namespace,json=claimNamespace,proto3" json:"claim_namespace,omitempty"` + ClaimResources []*ClaimResource `protobuf:"bytes,4,rep,name=claim_resources,json=claimResources,proto3" json:"claim_resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicResource) Reset() { *m = DynamicResource{} } +func (*DynamicResource) ProtoMessage() {} +func (*DynamicResource) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{10} +} +func (m *DynamicResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DynamicResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DynamicResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DynamicResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicResource.Merge(m, src) +} +func (m *DynamicResource) XXX_Size() int { + return m.Size() +} +func (m *DynamicResource) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicResource.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicResource proto.InternalMessageInfo + +func (m *DynamicResource) GetClassName() string { + if m != nil { + return m.ClassName + } + return "" +} + +func (m *DynamicResource) GetClaimName() string { + if m != nil { + return m.ClaimName + } + return "" +} + +func (m *DynamicResource) GetClaimNamespace() string { + if m != nil { + return m.ClaimNamespace + } + return "" +} + +func (m *DynamicResource) GetClaimResources() []*ClaimResource { + if m != nil { + return m.ClaimResources + } + return nil +} + +// ClaimResource contains per plugin resource information +type ClaimResource struct { + CDIDevices []*CDIDevice `protobuf:"bytes,1,rep,name=cdi_devices,json=cdiDevices,proto3" json:"cdi_devices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClaimResource) Reset() { *m = ClaimResource{} } +func (*ClaimResource) ProtoMessage() {} +func (*ClaimResource) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{11} +} +func (m *ClaimResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClaimResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClaimResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClaimResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClaimResource.Merge(m, src) +} +func (m *ClaimResource) XXX_Size() int { + return m.Size() +} +func (m *ClaimResource) XXX_DiscardUnknown() { + xxx_messageInfo_ClaimResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ClaimResource proto.InternalMessageInfo + +func (m *ClaimResource) GetCDIDevices() []*CDIDevice { + if m != nil { + return m.CDIDevices + } + return nil +} + +// CDIDevice specifies a CDI device information +type CDIDevice struct { + // Fully qualified CDI device name + // for example: vendor.com/gpu=gpudevice1 + // see more details in the CDI specification: + // https://github.com/container-orchestrated-devices/container-device-interface/blob/main/SPEC.md + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CDIDevice) Reset() { *m = CDIDevice{} } +func (*CDIDevice) ProtoMessage() {} +func (*CDIDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{12} +} +func (m *CDIDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CDIDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CDIDevice.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CDIDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_CDIDevice.Merge(m, src) +} +func (m *CDIDevice) XXX_Size() int { + return m.Size() +} +func (m *CDIDevice) XXX_DiscardUnknown() { + xxx_messageInfo_CDIDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_CDIDevice proto.InternalMessageInfo + +func (m *CDIDevice) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// GetPodResourcesRequest contains information about the pod +type GetPodResourcesRequest struct { + PodName string `protobuf:"bytes,1,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + PodNamespace string `protobuf:"bytes,2,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPodResourcesRequest) Reset() { *m = GetPodResourcesRequest{} } +func (*GetPodResourcesRequest) ProtoMessage() {} +func (*GetPodResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{13} +} +func (m *GetPodResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetPodResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetPodResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetPodResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPodResourcesRequest.Merge(m, src) +} +func (m *GetPodResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetPodResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPodResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPodResourcesRequest proto.InternalMessageInfo + +func (m *GetPodResourcesRequest) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *GetPodResourcesRequest) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +// GetPodResourcesResponse contains information about the pod the devices +type GetPodResourcesResponse struct { + PodResources *PodResources `protobuf:"bytes,1,opt,name=pod_resources,json=podResources,proto3" json:"pod_resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPodResourcesResponse) Reset() { *m = GetPodResourcesResponse{} } +func (*GetPodResourcesResponse) ProtoMessage() {} +func (*GetPodResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{14} +} +func (m *GetPodResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetPodResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetPodResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetPodResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPodResourcesResponse.Merge(m, src) +} +func (m *GetPodResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetPodResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPodResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPodResourcesResponse proto.InternalMessageInfo + +func (m *GetPodResourcesResponse) GetPodResources() *PodResources { + if m != nil { + return m.PodResources + } + return nil +} + func init() { proto.RegisterType((*AllocatableResourcesRequest)(nil), "v1.AllocatableResourcesRequest") proto.RegisterType((*AllocatableResourcesResponse)(nil), "v1.AllocatableResourcesResponse") @@ -587,48 +861,65 @@ func init() { proto.RegisterType((*ContainerDevices)(nil), "v1.ContainerDevices") proto.RegisterType((*TopologyInfo)(nil), "v1.TopologyInfo") proto.RegisterType((*NUMANode)(nil), "v1.NUMANode") + proto.RegisterType((*DynamicResource)(nil), "v1.DynamicResource") + proto.RegisterType((*ClaimResource)(nil), "v1.ClaimResource") + proto.RegisterType((*CDIDevice)(nil), "v1.CDIDevice") + proto.RegisterType((*GetPodResourcesRequest)(nil), "v1.GetPodResourcesRequest") + proto.RegisterType((*GetPodResourcesResponse)(nil), "v1.GetPodResourcesResponse") } func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 571 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xed, 0xda, 0xa1, 0x6d, 0xa6, 0x29, 0x54, 0x0b, 0xa2, 0x26, 0x4d, 0xdd, 0xc8, 0x1c, 0x88, - 0x04, 0xd8, 0x4a, 0x10, 0x88, 0x6b, 0x69, 0x10, 0x8a, 0x44, 0x23, 0xb0, 0xca, 0x85, 0x03, 0x91, - 0x63, 0x6f, 0x83, 0x95, 0xc4, 0xbb, 0x78, 0xd7, 0x11, 0xe1, 0xc4, 0x81, 0x0f, 0xe0, 0xc0, 0x99, - 0xff, 0xe0, 0x0f, 0x7a, 0xe4, 0xc8, 0x91, 0x86, 0x1f, 0x41, 0xbb, 0x8e, 0x53, 0xa7, 0x49, 0x40, - 0x3d, 0x79, 0x76, 0xde, 0xcc, 0xf8, 0xcd, 0xbc, 0xd9, 0x85, 0xa2, 0xc7, 0x42, 0x9b, 0xc5, 0x54, - 0x50, 0xac, 0x8d, 0xea, 0xe5, 0x87, 0xbd, 0x50, 0xbc, 0x4f, 0xba, 0xb6, 0x4f, 0x87, 0x4e, 0x8f, - 0xf6, 0xa8, 0xa3, 0xa0, 0x6e, 0x72, 0xaa, 0x4e, 0xea, 0xa0, 0xac, 0x34, 0xc5, 0xda, 0x87, 0xbd, - 0xc3, 0xc1, 0x80, 0xfa, 0x9e, 0xf0, 0xba, 0x03, 0xe2, 0x12, 0x4e, 0x93, 0xd8, 0x27, 0xdc, 0x25, - 0x1f, 0x12, 0xc2, 0x85, 0xf5, 0x0d, 0x41, 0x65, 0x39, 0xce, 0x19, 0x8d, 0x38, 0xc1, 0x36, 0x6c, - 0x04, 0x64, 0x14, 0xfa, 0x84, 0x1b, 0xa8, 0xaa, 0xd7, 0xb6, 0x1a, 0xb7, 0xec, 0x51, 0xdd, 0x3e, - 0xa2, 0x91, 0xf0, 0xc2, 0x88, 0xc4, 0xcd, 0x14, 0x73, 0xb3, 0x20, 0xbc, 0x0b, 0x1b, 0x3e, 0x4b, - 0x3a, 0x61, 0xc0, 0x0d, 0xad, 0xaa, 0xd7, 0x74, 0x77, 0xdd, 0x67, 0x49, 0x2b, 0xe0, 0xf8, 0x3e, - 0xac, 0x0f, 0xc9, 0x90, 0xc6, 0x63, 0x43, 0x57, 0x75, 0x6e, 0xce, 0xd5, 0x39, 0x56, 0x90, 0x3b, - 0x0d, 0xb1, 0xee, 0xc0, 0xee, 0xcb, 0x90, 0x8b, 0x57, 0x34, 0x58, 0x60, 0xfc, 0x1a, 0x8c, 0x45, - 0x68, 0x4a, 0xf6, 0x31, 0x6c, 0x33, 0x1a, 0x74, 0xe2, 0x0c, 0x98, 0x52, 0xde, 0x91, 0xbf, 0x9a, - 0x4b, 0x28, 0xb1, 0xdc, 0xc9, 0xfa, 0x08, 0xa5, 0x3c, 0x8a, 0x31, 0x14, 0x22, 0x6f, 0x48, 0x0c, - 0x54, 0x45, 0xb5, 0xa2, 0xab, 0x6c, 0x5c, 0x81, 0xa2, 0xfc, 0x72, 0xe6, 0xf9, 0xc4, 0xd0, 0x14, - 0x70, 0xe1, 0xc0, 0x4f, 0x00, 0xfc, 0xac, 0x15, 0x3e, 0x6d, 0xf0, 0xf6, 0x5c, 0x83, 0x17, 0xff, - 0xce, 0x45, 0x5a, 0xdf, 0x11, 0xe0, 0xc5, 0x90, 0xa5, 0x04, 0x72, 0x42, 0x68, 0x57, 0x14, 0x42, - 0x5f, 0x21, 0x44, 0xe1, 0xff, 0x42, 0x08, 0xb8, 0x71, 0x09, 0xc2, 0x07, 0xb0, 0x95, 0x82, 0x1d, - 0x31, 0x66, 0x19, 0x47, 0x48, 0x5d, 0x27, 0x63, 0x46, 0x24, 0x7b, 0x1e, 0x7e, 0x4a, 0xa7, 0x54, - 0x70, 0x95, 0x8d, 0x1f, 0xc0, 0xa6, 0xa0, 0x8c, 0x0e, 0x68, 0x4f, 0xea, 0x8f, 0x32, 0x51, 0x4e, - 0xa6, 0xbe, 0x56, 0x74, 0x4a, 0xdd, 0x59, 0x84, 0xf5, 0x05, 0xc1, 0xce, 0xe5, 0xce, 0xf0, 0x5d, - 0xd8, 0xce, 0x84, 0xed, 0xe4, 0xa6, 0x53, 0xca, 0x9c, 0x6d, 0x39, 0xa5, 0x7d, 0x80, 0x74, 0x00, - 0xb3, 0x0d, 0x2c, 0xba, 0xc5, 0xd4, 0x23, 0x7b, 0xbf, 0x1a, 0x8d, 0x06, 0x94, 0xf2, 0x08, 0xb6, - 0xe0, 0x5a, 0x44, 0x83, 0xd9, 0x5a, 0x95, 0x64, 0x6a, 0xfb, 0xcd, 0xf1, 0x61, 0x9b, 0x06, 0xc4, - 0x4d, 0x21, 0xab, 0x0c, 0x9b, 0x99, 0x0b, 0x5f, 0x07, 0xad, 0xd5, 0x54, 0x34, 0x75, 0x57, 0x6b, - 0x35, 0x1b, 0x3f, 0x10, 0xe0, 0xfc, 0xa2, 0xc9, 0x3d, 0x26, 0x31, 0x3e, 0x82, 0x82, 0xb4, 0xf0, - 0x9e, 0xac, 0xb7, 0x62, 0xed, 0xcb, 0x95, 0xe5, 0x60, 0xba, 0xf8, 0xd6, 0x1a, 0x7e, 0x07, 0xbb, - 0x2f, 0x88, 0x58, 0x76, 0x95, 0xf1, 0x81, 0x4c, 0xfd, 0xc7, 0x23, 0x50, 0xae, 0xae, 0x0e, 0xc8, - 0xea, 0x3f, 0x7b, 0x7e, 0x76, 0x6e, 0xa2, 0x5f, 0xe7, 0xe6, 0xda, 0xe7, 0x89, 0x89, 0xce, 0x26, - 0x26, 0xfa, 0x39, 0x31, 0xd1, 0xef, 0x89, 0x89, 0xbe, 0xfe, 0x31, 0xd7, 0xde, 0xde, 0xeb, 0x3f, - 0xe5, 0x76, 0x48, 0x9d, 0x7e, 0xd2, 0x25, 0x03, 0x22, 0x1c, 0xd6, 0xef, 0x39, 0x1e, 0x0b, 0xb9, - 0xc3, 0x68, 0x30, 0xbb, 0x8d, 0xce, 0xa8, 0xde, 0x5d, 0x57, 0xaf, 0xd2, 0xa3, 0xbf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xdd, 0x5e, 0x3b, 0x2f, 0xd5, 0x04, 0x00, 0x00, + // 760 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0x12, 0x4d, + 0x14, 0xee, 0xb0, 0xf4, 0x83, 0x53, 0xe8, 0xc7, 0xbc, 0x6f, 0x5a, 0x4a, 0x5b, 0x20, 0xdb, 0x8b, + 0x36, 0x51, 0x21, 0xad, 0xd1, 0x18, 0x2f, 0x4c, 0x3f, 0x30, 0x0d, 0x89, 0xad, 0x75, 0x53, 0x13, + 0xe3, 0x85, 0x64, 0xd9, 0x9d, 0xe2, 0xa4, 0xc0, 0x8c, 0xcc, 0x42, 0xc4, 0x2b, 0x2f, 0xfc, 0x01, + 0x5e, 0xf8, 0x53, 0xfc, 0x11, 0xbd, 0xf4, 0xd2, 0x2b, 0xd3, 0xe2, 0xcf, 0xf0, 0xc6, 0xcc, 0x0c, + 0xbb, 0x2c, 0xb0, 0xd8, 0xf4, 0x8a, 0x99, 0xf3, 0x3c, 0xe7, 0x70, 0xce, 0x73, 0xce, 0x9c, 0x85, + 0x84, 0xcd, 0x69, 0x81, 0xb7, 0x98, 0xc7, 0x70, 0xac, 0xb3, 0x9b, 0x79, 0x50, 0xa3, 0xde, 0xfb, + 0x76, 0xb5, 0xe0, 0xb0, 0x46, 0xb1, 0xc6, 0x6a, 0xac, 0xa8, 0xa0, 0x6a, 0xfb, 0x42, 0xdd, 0xd4, + 0x45, 0x9d, 0xb4, 0x8b, 0xb9, 0x09, 0xeb, 0x07, 0xf5, 0x3a, 0x73, 0x6c, 0xcf, 0xae, 0xd6, 0x89, + 0x45, 0x04, 0x6b, 0xb7, 0x1c, 0x22, 0x2c, 0xf2, 0xa1, 0x4d, 0x84, 0x67, 0x7e, 0x43, 0xb0, 0x11, + 0x8d, 0x0b, 0xce, 0x9a, 0x82, 0xe0, 0x02, 0xcc, 0xba, 0xa4, 0x43, 0x1d, 0x22, 0xd2, 0x28, 0x6f, + 0xec, 0xcc, 0xef, 0xfd, 0x5f, 0xe8, 0xec, 0x16, 0x8e, 0x58, 0xd3, 0xb3, 0x69, 0x93, 0xb4, 0x4a, + 0x1a, 0xb3, 0x7c, 0x12, 0x5e, 0x85, 0x59, 0x87, 0xb7, 0x2b, 0xd4, 0x15, 0xe9, 0x58, 0xde, 0xd8, + 0x31, 0xac, 0x19, 0x87, 0xb7, 0xcb, 0xae, 0xc0, 0xf7, 0x60, 0xa6, 0x41, 0x1a, 0xac, 0xd5, 0x4d, + 0x1b, 0x2a, 0xce, 0x7f, 0x43, 0x71, 0x4e, 0x14, 0x64, 0xf5, 0x29, 0xe6, 0x1a, 0xac, 0xbe, 0xa0, + 0xc2, 0x3b, 0x63, 0xee, 0x58, 0xc6, 0xaf, 0x20, 0x3d, 0x0e, 0xf5, 0x93, 0x7d, 0x04, 0x29, 0xce, + 0xdc, 0x4a, 0xcb, 0x07, 0xfa, 0x29, 0x2f, 0xc9, 0xbf, 0x1a, 0x72, 0x48, 0xf2, 0xd0, 0xcd, 0xfc, + 0x08, 0xc9, 0x30, 0x8a, 0x31, 0xc4, 0x9b, 0x76, 0x83, 0xa4, 0x51, 0x1e, 0xed, 0x24, 0x2c, 0x75, + 0xc6, 0x1b, 0x90, 0x90, 0xbf, 0x82, 0xdb, 0x0e, 0x49, 0xc7, 0x14, 0x30, 0x30, 0xe0, 0xc7, 0x00, + 0x8e, 0x5f, 0x8a, 0xe8, 0x17, 0xb8, 0x32, 0x54, 0xe0, 0xe0, 0xbf, 0x43, 0x4c, 0xf3, 0x1a, 0x01, + 0x1e, 0xa7, 0x44, 0x26, 0x10, 0x6a, 0x44, 0xec, 0x8e, 0x8d, 0x30, 0x26, 0x34, 0x22, 0x7e, 0x6b, + 0x23, 0xf0, 0x3e, 0x2c, 0xbb, 0xdd, 0xa6, 0xdd, 0xa0, 0x4e, 0x48, 0xd5, 0xe9, 0x81, 0x5f, 0x49, + 0x83, 0x7e, 0xea, 0xd6, 0x92, 0x3b, 0x6c, 0x10, 0xa6, 0x07, 0x8b, 0x23, 0xc1, 0x71, 0x0e, 0xe6, + 0x75, 0xf8, 0x8a, 0xd7, 0xe5, 0x7e, 0x95, 0xa0, 0x4d, 0xe7, 0x5d, 0x4e, 0x64, 0xfd, 0x82, 0x7e, + 0xd2, 0x3a, 0xc7, 0x2d, 0x75, 0xc6, 0xf7, 0x61, 0xce, 0x63, 0x9c, 0xd5, 0x59, 0x4d, 0x4e, 0x10, + 0xf2, 0xdb, 0x7a, 0xde, 0xb7, 0x95, 0x9b, 0x17, 0xcc, 0x0a, 0x18, 0xe6, 0x17, 0x04, 0x4b, 0xa3, + 0xda, 0xe0, 0x2d, 0x48, 0xf9, 0x45, 0x54, 0x42, 0xfa, 0x26, 0x7d, 0xe3, 0xa9, 0xd4, 0x79, 0x13, + 0x40, 0x4b, 0x18, 0xcc, 0x70, 0xc2, 0x4a, 0x68, 0x8b, 0x54, 0xef, 0x6e, 0x69, 0xec, 0x41, 0x32, + 0x8c, 0x60, 0x13, 0xa6, 0x9b, 0xcc, 0x0d, 0x06, 0x33, 0x29, 0x5d, 0x4f, 0x5f, 0x9f, 0x1c, 0x9c, + 0x32, 0x97, 0x58, 0x1a, 0x32, 0x33, 0x30, 0xe7, 0x9b, 0xf0, 0x02, 0xc4, 0xca, 0x25, 0x95, 0xa6, + 0x61, 0xc5, 0xca, 0x25, 0xf3, 0x3b, 0x82, 0xc5, 0x11, 0xc9, 0x65, 0xc2, 0x4e, 0xdd, 0x16, 0x22, + 0x5c, 0x52, 0x42, 0x59, 0xfc, 0x7a, 0x9c, 0xba, 0x4d, 0x1b, 0x1a, 0x8e, 0x05, 0x30, 0x6d, 0x28, + 0x78, 0x1b, 0x16, 0x07, 0xb0, 0x9e, 0x6e, 0x43, 0x71, 0x16, 0x02, 0x8e, 0x1e, 0xf1, 0xa7, 0x3e, + 0x71, 0x30, 0x07, 0x7a, 0x7e, 0x96, 0xd5, 0xfc, 0x48, 0x28, 0x98, 0x02, 0xed, 0x3b, 0x98, 0x81, + 0x97, 0x90, 0x1a, 0x22, 0xe0, 0x67, 0x30, 0xef, 0xb8, 0xb4, 0x32, 0xbc, 0x59, 0x52, 0x2a, 0x50, + 0xa9, 0xac, 0xdb, 0x75, 0xb8, 0xd0, 0xfb, 0x95, 0x83, 0xe0, 0x2a, 0xdf, 0x8d, 0x4b, 0xfb, 0x67, + 0x33, 0x07, 0x89, 0x00, 0x89, 0x7a, 0x2d, 0xe6, 0x1b, 0x58, 0x39, 0x26, 0x51, 0xfb, 0x03, 0xaf, + 0xc1, 0x9c, 0xdc, 0x11, 0x21, 0x8f, 0x59, 0xce, 0x5c, 0xa5, 0xc5, 0x96, 0x5e, 0x1f, 0xa3, 0xef, + 0x3c, 0xd9, 0xc7, 0x95, 0xcd, 0x3c, 0x83, 0xd5, 0xb1, 0xc8, 0x93, 0xd7, 0x0f, 0xba, 0x7d, 0xfd, + 0xec, 0xfd, 0x41, 0x80, 0xc3, 0xb0, 0x5c, 0x6f, 0xa4, 0x85, 0x8f, 0x20, 0x2e, 0x4f, 0x78, 0x5d, + 0xba, 0x4f, 0xd8, 0x86, 0x99, 0x8d, 0x68, 0x50, 0x27, 0x64, 0x4e, 0xe1, 0x77, 0x2a, 0xdb, 0xa8, + 0x0d, 0x8f, 0x73, 0xd2, 0xf5, 0x1f, 0xdf, 0x86, 0x4c, 0x7e, 0x32, 0x21, 0x88, 0xbf, 0x0f, 0xc6, + 0x31, 0xf1, 0x70, 0x46, 0x52, 0xa3, 0x05, 0xcf, 0xac, 0x47, 0x62, 0x7e, 0x84, 0xc3, 0xe7, 0x57, + 0x37, 0x59, 0xf4, 0xf3, 0x26, 0x3b, 0xf5, 0xb9, 0x97, 0x45, 0x57, 0xbd, 0x2c, 0xfa, 0xd1, 0xcb, + 0xa2, 0xeb, 0x5e, 0x16, 0x7d, 0xfd, 0x9d, 0x9d, 0x7a, 0xbb, 0x7d, 0xf9, 0x44, 0x14, 0x28, 0x2b, + 0x5e, 0xb6, 0xab, 0xa4, 0x4e, 0xbc, 0x22, 0xbf, 0xac, 0x15, 0x6d, 0x4e, 0x45, 0x91, 0x33, 0x37, + 0xd0, 0xb9, 0xd8, 0xd9, 0xad, 0xce, 0xa8, 0xcf, 0xdd, 0xc3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x49, 0xac, 0x87, 0x00, 0x2e, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -645,6 +936,7 @@ const _ = grpc.SupportPackageIsVersion4 type PodResourcesListerClient interface { List(ctx context.Context, in *ListPodResourcesRequest, opts ...grpc.CallOption) (*ListPodResourcesResponse, error) GetAllocatableResources(ctx context.Context, in *AllocatableResourcesRequest, opts ...grpc.CallOption) (*AllocatableResourcesResponse, error) + Get(ctx context.Context, in *GetPodResourcesRequest, opts ...grpc.CallOption) (*GetPodResourcesResponse, error) } type podResourcesListerClient struct { @@ -673,10 +965,20 @@ func (c *podResourcesListerClient) GetAllocatableResources(ctx context.Context, return out, nil } +func (c *podResourcesListerClient) Get(ctx context.Context, in *GetPodResourcesRequest, opts ...grpc.CallOption) (*GetPodResourcesResponse, error) { + out := new(GetPodResourcesResponse) + err := c.cc.Invoke(ctx, "/v1.PodResourcesLister/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // PodResourcesListerServer is the server API for PodResourcesLister service. type PodResourcesListerServer interface { List(context.Context, *ListPodResourcesRequest) (*ListPodResourcesResponse, error) GetAllocatableResources(context.Context, *AllocatableResourcesRequest) (*AllocatableResourcesResponse, error) + Get(context.Context, *GetPodResourcesRequest) (*GetPodResourcesResponse, error) } // UnimplementedPodResourcesListerServer can be embedded to have forward compatible implementations. @@ -689,6 +991,9 @@ func (*UnimplementedPodResourcesListerServer) List(ctx context.Context, req *Lis func (*UnimplementedPodResourcesListerServer) GetAllocatableResources(ctx context.Context, req *AllocatableResourcesRequest) (*AllocatableResourcesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetAllocatableResources not implemented") } +func (*UnimplementedPodResourcesListerServer) Get(ctx context.Context, req *GetPodResourcesRequest) (*GetPodResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} func RegisterPodResourcesListerServer(s *grpc.Server, srv PodResourcesListerServer) { s.RegisterService(&_PodResourcesLister_serviceDesc, srv) @@ -730,6 +1035,24 @@ func _PodResourcesLister_GetAllocatableResources_Handler(srv interface{}, ctx co return interceptor(ctx, in, info, handler) } +func _PodResourcesLister_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPodResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PodResourcesListerServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1.PodResourcesLister/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PodResourcesListerServer).Get(ctx, req.(*GetPodResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _PodResourcesLister_serviceDesc = grpc.ServiceDesc{ ServiceName: "v1.PodResourcesLister", HandlerType: (*PodResourcesListerServer)(nil), @@ -742,6 +1065,10 @@ var _PodResourcesLister_serviceDesc = grpc.ServiceDesc{ MethodName: "GetAllocatableResources", Handler: _PodResourcesLister_GetAllocatableResources_Handler, }, + { + MethodName: "Get", + Handler: _PodResourcesLister_Get_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "api.proto", @@ -971,6 +1298,20 @@ func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.DynamicResources) > 0 { + for iNdEx := len(m.DynamicResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DynamicResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Memory) > 0 { for iNdEx := len(m.Memory) - 1; iNdEx >= 0; iNdEx-- { { @@ -1191,6 +1532,203 @@ func (m *NUMANode) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *DynamicResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DynamicResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClaimResources) > 0 { + for iNdEx := len(m.ClaimResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClaimResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ClaimNamespace) > 0 { + i -= len(m.ClaimNamespace) + copy(dAtA[i:], m.ClaimNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.ClaimNamespace))) + i-- + dAtA[i] = 0x1a + } + if len(m.ClaimName) > 0 { + i -= len(m.ClaimName) + copy(dAtA[i:], m.ClaimName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ClaimName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClassName) > 0 { + i -= len(m.ClassName) + copy(dAtA[i:], m.ClassName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ClassName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClaimResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClaimResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClaimResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CDIDevices) > 0 { + for iNdEx := len(m.CDIDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CDIDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CDIDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CDIDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CDIDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetPodResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPodResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetPodResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetPodResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPodResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetPodResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PodResources != nil { + { + size, err := m.PodResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintApi(dAtA []byte, offset int, v uint64) int { offset -= sovApi(v) base := offset @@ -1315,6 +1853,12 @@ func (m *ContainerResources) Size() (n int) { n += 1 + l + sovApi(uint64(l)) } } + if len(m.DynamicResources) > 0 { + for _, e := range m.DynamicResources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } return n } @@ -1388,6 +1932,91 @@ func (m *NUMANode) Size() (n int) { return n } +func (m *DynamicResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClassName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ClaimName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ClaimNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.ClaimResources) > 0 { + for _, e := range m.ClaimResources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ClaimResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CDIDevices) > 0 { + for _, e := range m.CDIDevices { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *CDIDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *GetPodResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *GetPodResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodResources != nil { + l = m.PodResources.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + func sovApi(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1480,11 +2109,17 @@ func (this *ContainerResources) String() string { repeatedStringForMemory += strings.Replace(f.String(), "ContainerMemory", "ContainerMemory", 1) + "," } repeatedStringForMemory += "}" + repeatedStringForDynamicResources := "[]*DynamicResource{" + for _, f := range this.DynamicResources { + repeatedStringForDynamicResources += strings.Replace(f.String(), "DynamicResource", "DynamicResource", 1) + "," + } + repeatedStringForDynamicResources += "}" s := strings.Join([]string{`&ContainerResources{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Devices:` + repeatedStringForDevices + `,`, `CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`, `Memory:` + repeatedStringForMemory + `,`, + `DynamicResources:` + repeatedStringForDynamicResources + `,`, `}`, }, "") return s @@ -1538,6 +2173,70 @@ func (this *NUMANode) String() string { }, "") return s } +func (this *DynamicResource) String() string { + if this == nil { + return "nil" + } + repeatedStringForClaimResources := "[]*ClaimResource{" + for _, f := range this.ClaimResources { + repeatedStringForClaimResources += strings.Replace(f.String(), "ClaimResource", "ClaimResource", 1) + "," + } + repeatedStringForClaimResources += "}" + s := strings.Join([]string{`&DynamicResource{`, + `ClassName:` + fmt.Sprintf("%v", this.ClassName) + `,`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ClaimNamespace:` + fmt.Sprintf("%v", this.ClaimNamespace) + `,`, + `ClaimResources:` + repeatedStringForClaimResources + `,`, + `}`, + }, "") + return s +} +func (this *ClaimResource) String() string { + if this == nil { + return "nil" + } + repeatedStringForCDIDevices := "[]*CDIDevice{" + for _, f := range this.CDIDevices { + repeatedStringForCDIDevices += strings.Replace(f.String(), "CDIDevice", "CDIDevice", 1) + "," + } + repeatedStringForCDIDevices += "}" + s := strings.Join([]string{`&ClaimResource{`, + `CDIDevices:` + repeatedStringForCDIDevices + `,`, + `}`, + }, "") + return s +} +func (this *CDIDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CDIDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *GetPodResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetPodResourcesRequest{`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *GetPodResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetPodResourcesResponse{`, + `PodResources:` + strings.Replace(this.PodResources.String(), "PodResources", "PodResources", 1) + `,`, + `}`, + }, "") + return s +} func valueToStringApi(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2277,6 +2976,40 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DynamicResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DynamicResources = append(m.DynamicResources, &DynamicResource{}) + if err := m.DynamicResources[len(m.DynamicResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -2738,6 +3471,552 @@ func (m *NUMANode) Unmarshal(dAtA []byte) error { } return nil } +func (m *DynamicResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DynamicResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DynamicResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimResources = append(m.ClaimResources, &ClaimResource{}) + if err := m.ClaimResources[len(m.ClaimResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClaimResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClaimResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClaimResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CDIDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CDIDevices = append(m.CDIDevices, &CDIDevice{}) + if err := m.CDIDevices[len(m.CDIDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CDIDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CDIDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CDIDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPodResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPodResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPodResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPodResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPodResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPodResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodResources == nil { + m.PodResources = &PodResources{} + } + if err := m.PodResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipApi(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto index 751dec2a71e..9e6022ac32c 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto +++ b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto @@ -20,6 +20,7 @@ option (gogoproto.goproto_unrecognized_all) = false; service PodResourcesLister { rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} rpc GetAllocatableResources(AllocatableResourcesRequest) returns (AllocatableResourcesResponse) {} + rpc Get(GetPodResourcesRequest) returns (GetPodResourcesResponse) {} } message AllocatableResourcesRequest {} @@ -52,6 +53,7 @@ message ContainerResources { repeated ContainerDevices devices = 2; repeated int64 cpu_ids = 3; repeated ContainerMemory memory = 4; + repeated DynamicResource dynamic_resources = 5; } // ContainerMemory contains information about memory and hugepages assigned to a container @@ -77,3 +79,36 @@ message TopologyInfo { message NUMANode { int64 ID = 1; } + +// DynamicResource contains information about the devices assigned to a container by DRA +message DynamicResource { + string class_name = 1; + string claim_name = 2; + string claim_namespace = 3; + repeated ClaimResource claim_resources = 4; +} + +// ClaimResource contains per plugin resource information +message ClaimResource { + repeated CDIDevice cdi_devices = 1 [(gogoproto.customname) = "CDIDevices"]; +} + +// CDIDevice specifies a CDI device information +message CDIDevice { + // Fully qualified CDI device name + // for example: vendor.com/gpu=gpudevice1 + // see more details in the CDI specification: + // https://github.com/container-orchestrated-devices/container-device-interface/blob/main/SPEC.md + string name = 1; +} + +// GetPodResourcesRequest contains information about the pod +message GetPodResourcesRequest { + string pod_name = 1; + string pod_namespace = 2; +} + +// GetPodResourcesResponse contains information about the pod the devices +message GetPodResourcesResponse { + PodResources pod_resources = 1; +} From 67a71c0bd763d5cea66ddc59083f65d73711325e Mon Sep 17 00:00:00 2001 From: Moshe Levi Date: Thu, 9 Mar 2023 02:03:00 +0200 Subject: [PATCH 6/6] kubelet podresources: add unit tests for DyanmicResource and Get method Signed-off-by: Moshe Levi --- .../apis/podresources/server_v1_test.go | 416 +++++++++++++++--- 1 file changed, 366 insertions(+), 50 deletions(-) diff --git a/pkg/kubelet/apis/podresources/server_v1_test.go b/pkg/kubelet/apis/podresources/server_v1_test.go index 47816c8b640..8688876a108 100644 --- a/pkg/kubelet/apis/podresources/server_v1_test.go +++ b/pkg/kubelet/apis/podresources/server_v1_test.go @@ -18,6 +18,7 @@ package podresources import ( "context" + "fmt" "reflect" "sort" "testing" @@ -66,12 +67,41 @@ func TestListPodResourcesV1(t *testing.T) { }, } + containers := []v1.Container{ + { + Name: containerName, + }, + } + pods := []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: podNamespace, + UID: podUID, + }, + Spec: v1.PodSpec{ + Containers: containers, + }, + }, + } + + pluginCDIDevices := []*podresourcesapi.CDIDevice{{Name: "dra-dev0"}, {Name: "dra-dev1"}} + draDevs := []*podresourcesapi.DynamicResource{ + { + ClassName: "resource-class", + ClaimName: "claim-name", + ClaimNamespace: "default", + ClaimResources: []*podresourcesapi.ClaimResource{{CDIDevices: pluginCDIDevices}}, + }, + } + for _, tc := range []struct { desc string pods []*v1.Pod devices []*podresourcesapi.ContainerDevices cpus []int64 memory []*podresourcesapi.ContainerMemory + dynamicResources []*podresourcesapi.DynamicResource expectedResponse *podresourcesapi.ListPodResourcesResponse }{ { @@ -80,29 +110,16 @@ func TestListPodResourcesV1(t *testing.T) { devices: []*podresourcesapi.ContainerDevices{}, cpus: []int64{}, memory: []*podresourcesapi.ContainerMemory{}, + dynamicResources: []*podresourcesapi.DynamicResource{}, expectedResponse: &podresourcesapi.ListPodResourcesResponse{}, }, { - desc: "pod without devices", - pods: []*v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: podNamespace, - UID: podUID, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: containerName, - }, - }, - }, - }, - }, - devices: []*podresourcesapi.ContainerDevices{}, - cpus: []int64{}, - memory: []*podresourcesapi.ContainerMemory{}, + desc: "pod without devices", + pods: pods, + devices: []*podresourcesapi.ContainerDevices{}, + cpus: []int64{}, + memory: []*podresourcesapi.ContainerMemory{}, + dynamicResources: []*podresourcesapi.DynamicResource{}, expectedResponse: &podresourcesapi.ListPodResourcesResponse{ PodResources: []*podresourcesapi.PodResources{ { @@ -110,8 +127,9 @@ func TestListPodResourcesV1(t *testing.T) { Namespace: podNamespace, Containers: []*podresourcesapi.ContainerResources{ { - Name: containerName, - Devices: []*podresourcesapi.ContainerDevices{}, + Name: containerName, + Devices: []*podresourcesapi.ContainerDevices{}, + DynamicResources: []*podresourcesapi.DynamicResource{}, }, }, }, @@ -119,26 +137,12 @@ func TestListPodResourcesV1(t *testing.T) { }, }, { - desc: "pod with devices", - pods: []*v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: podNamespace, - UID: podUID, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: containerName, - }, - }, - }, - }, - }, - devices: devs, - cpus: cpus, - memory: memory, + desc: "pod with devices", + pods: pods, + devices: devs, + cpus: cpus, + memory: memory, + dynamicResources: []*podresourcesapi.DynamicResource{}, expectedResponse: &podresourcesapi.ListPodResourcesResponse{ PodResources: []*podresourcesapi.PodResources{ { @@ -146,10 +150,61 @@ func TestListPodResourcesV1(t *testing.T) { Namespace: podNamespace, Containers: []*podresourcesapi.ContainerResources{ { - Name: containerName, - Devices: devs, - CpuIds: cpus, - Memory: memory, + Name: containerName, + Devices: devs, + CpuIds: cpus, + Memory: memory, + DynamicResources: []*podresourcesapi.DynamicResource{}, + }, + }, + }, + }, + }, + }, + { + desc: "pod with dynamic resources", + pods: pods, + devices: []*podresourcesapi.ContainerDevices{}, + cpus: cpus, + memory: memory, + dynamicResources: draDevs, + expectedResponse: &podresourcesapi.ListPodResourcesResponse{ + PodResources: []*podresourcesapi.PodResources{ + { + Name: podName, + Namespace: podNamespace, + Containers: []*podresourcesapi.ContainerResources{ + { + Name: containerName, + Devices: []*podresourcesapi.ContainerDevices{}, + CpuIds: cpus, + Memory: memory, + DynamicResources: draDevs, + }, + }, + }, + }, + }, + }, + { + desc: "pod with dynamic resources and devices", + pods: pods, + devices: devs, + cpus: cpus, + memory: memory, + dynamicResources: draDevs, + expectedResponse: &podresourcesapi.ListPodResourcesResponse{ + PodResources: []*podresourcesapi.PodResources{ + { + Name: podName, + Namespace: podNamespace, + Containers: []*podresourcesapi.ContainerResources{ + { + Name: containerName, + Devices: devs, + CpuIds: cpus, + Memory: memory, + DynamicResources: draDevs, }, }, }, @@ -158,25 +213,29 @@ func TestListPodResourcesV1(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesDynamicResources, true)() mockDevicesProvider := podresourcetest.NewMockDevicesProvider(mockCtrl) mockPodsProvider := podresourcetest.NewMockPodsProvider(mockCtrl) mockCPUsProvider := podresourcetest.NewMockCPUsProvider(mockCtrl) mockMemoryProvider := podresourcetest.NewMockMemoryProvider(mockCtrl) + mockDynamicResourcesProvider := podresourcetest.NewMockDynamicResourcesProvider(mockCtrl) mockPodsProvider.EXPECT().GetPods().Return(tc.pods).AnyTimes().AnyTimes() mockDevicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(tc.devices).AnyTimes() mockCPUsProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(tc.cpus).AnyTimes() mockMemoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(tc.memory).AnyTimes() + mockDynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &containers[0]).Return(tc.dynamicResources).AnyTimes() mockDevicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes() mockCPUsProvider.EXPECT().GetAllocatableCPUs().Return([]int64{}).AnyTimes() mockDevicesProvider.EXPECT().GetAllocatableDevices().Return([]*podresourcesapi.ContainerDevices{}).AnyTimes() mockMemoryProvider.EXPECT().GetAllocatableMemory().Return([]*podresourcesapi.ContainerMemory{}).AnyTimes() providers := PodResourcesProviders{ - Pods: mockPodsProvider, - Devices: mockDevicesProvider, - Cpus: mockCPUsProvider, - Memory: mockMemoryProvider, + Pods: mockPodsProvider, + Devices: mockDevicesProvider, + Cpus: mockCPUsProvider, + Memory: mockMemoryProvider, + DynamicResources: mockDynamicResourcesProvider, } server := NewV1PodResourcesServer(providers) resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{}) @@ -485,6 +544,185 @@ func TestAllocatableResources(t *testing.T) { } } +func TestGetPodResourcesV1(t *testing.T) { + podName := "pod-name" + podNamespace := "pod-namespace" + podUID := types.UID("pod-uid") + containerName := "container-name" + numaID := int64(1) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + devs := []*podresourcesapi.ContainerDevices{ + { + ResourceName: "resource", + DeviceIds: []string{"dev0", "dev1"}, + Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}}, + }, + } + + cpus := []int64{12, 23, 30} + + memory := []*podresourcesapi.ContainerMemory{ + { + MemoryType: "memory", + Size_: 1073741824, + Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}}, + }, + { + MemoryType: "hugepages-1Gi", + Size_: 1073741824, + Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}}, + }, + } + + containers := []v1.Container{ + { + Name: containerName, + }, + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: podNamespace, + UID: podUID, + }, + Spec: v1.PodSpec{ + Containers: containers, + }, + } + + pluginCDIDevices := []*podresourcesapi.CDIDevice{{Name: "dra-dev0"}, {Name: "dra-dev1"}} + draDevs := []*podresourcesapi.DynamicResource{ + { + ClassName: "resource-class", + ClaimName: "claim-name", + ClaimNamespace: "default", + ClaimResources: []*podresourcesapi.ClaimResource{{CDIDevices: pluginCDIDevices}}, + }, + } + + for _, tc := range []struct { + desc string + err error + exist bool + pod *v1.Pod + devices []*podresourcesapi.ContainerDevices + cpus []int64 + memory []*podresourcesapi.ContainerMemory + dynamicResources []*podresourcesapi.DynamicResource + expectedResponse *podresourcesapi.GetPodResourcesResponse + }{ + { + desc: "pod not exist", + err: fmt.Errorf("pod %s in namespace %s not found", podName, podNamespace), + exist: false, + pod: nil, + devices: []*podresourcesapi.ContainerDevices{}, + cpus: []int64{}, + memory: []*podresourcesapi.ContainerMemory{}, + dynamicResources: []*podresourcesapi.DynamicResource{}, + + expectedResponse: &podresourcesapi.GetPodResourcesResponse{}, + }, + { + desc: "pod without devices", + err: nil, + exist: true, + pod: pod, + devices: []*podresourcesapi.ContainerDevices{}, + cpus: []int64{}, + memory: []*podresourcesapi.ContainerMemory{}, + dynamicResources: []*podresourcesapi.DynamicResource{}, + expectedResponse: &podresourcesapi.GetPodResourcesResponse{ + PodResources: &podresourcesapi.PodResources{ + Name: podName, + Namespace: podNamespace, + Containers: []*podresourcesapi.ContainerResources{ + { + Name: containerName, + Devices: []*podresourcesapi.ContainerDevices{}, + DynamicResources: []*podresourcesapi.DynamicResource{}, + }, + }, + }, + }, + }, + { + desc: "pod with devices", + err: nil, + exist: true, + pod: pod, + devices: devs, + cpus: cpus, + memory: memory, + dynamicResources: draDevs, + expectedResponse: &podresourcesapi.GetPodResourcesResponse{ + PodResources: &podresourcesapi.PodResources{ + Name: podName, + Namespace: podNamespace, + Containers: []*podresourcesapi.ContainerResources{ + { + Name: containerName, + Devices: devs, + CpuIds: cpus, + Memory: memory, + DynamicResources: draDevs, + }, + }, + }, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesGet, true)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.KubeletPodResourcesDynamicResources, true)() + mockDevicesProvider := podresourcetest.NewMockDevicesProvider(mockCtrl) + mockPodsProvider := podresourcetest.NewMockPodsProvider(mockCtrl) + mockCPUsProvider := podresourcetest.NewMockCPUsProvider(mockCtrl) + mockMemoryProvider := podresourcetest.NewMockMemoryProvider(mockCtrl) + mockDynamicResourcesProvider := podresourcetest.NewMockDynamicResourcesProvider(mockCtrl) + + mockPodsProvider.EXPECT().GetPodByName(podNamespace, podName).Return(tc.pod, tc.exist).AnyTimes() + mockDevicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(tc.devices).AnyTimes() + mockCPUsProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(tc.cpus).AnyTimes() + mockMemoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(tc.memory).AnyTimes() + mockDynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &containers[0]).Return(tc.dynamicResources).AnyTimes() + mockDevicesProvider.EXPECT().UpdateAllocatedDevices().Return().AnyTimes() + mockCPUsProvider.EXPECT().GetAllocatableCPUs().Return([]int64{}).AnyTimes() + mockDevicesProvider.EXPECT().GetAllocatableDevices().Return([]*podresourcesapi.ContainerDevices{}).AnyTimes() + mockMemoryProvider.EXPECT().GetAllocatableMemory().Return([]*podresourcesapi.ContainerMemory{}).AnyTimes() + + providers := PodResourcesProviders{ + Pods: mockPodsProvider, + Devices: mockDevicesProvider, + Cpus: mockCPUsProvider, + Memory: mockMemoryProvider, + DynamicResources: mockDynamicResourcesProvider, + } + server := NewV1PodResourcesServer(providers) + podReq := &podresourcesapi.GetPodResourcesRequest{PodName: podName, PodNamespace: podNamespace} + resp, err := server.Get(context.TODO(), podReq) + if err != nil { + if err.Error() != tc.err.Error() { + t.Errorf("want exit = %v, got %v", tc.err, err) + } + } else { + if err != err { + t.Errorf("want exit = %v, got %v", tc.err, err) + } else { + if !equalGetResponse(tc.expectedResponse, resp) { + t.Errorf("want resp = %s, got %s", tc.expectedResponse.String(), resp.String()) + } + } + } + }) + } + +} + func equalListResponse(respA, respB *podresourcesapi.ListPodResourcesResponse) bool { if len(respA.PodResources) != len(respB.PodResources) { return false @@ -515,11 +753,54 @@ func equalListResponse(respA, respB *podresourcesapi.ListPodResourcesResponse) b if !equalContainerDevices(cntA.Devices, cntB.Devices) { return false } + + if !euqalDynamicResources(cntA.DynamicResources, cntB.DynamicResources) { + return false + } } } return true } +func euqalDynamicResources(draResA, draResB []*podresourcesapi.DynamicResource) bool { + if len(draResA) != len(draResB) { + return false + } + + for idx := 0; idx < len(draResA); idx++ { + cntDraResA := draResA[idx] + cntDraResB := draResB[idx] + + if cntDraResA.ClassName != cntDraResB.ClassName { + return false + } + if cntDraResA.ClaimName != cntDraResB.ClaimName { + return false + } + if cntDraResA.ClaimNamespace != cntDraResB.ClaimNamespace { + return false + } + if len(cntDraResA.ClaimResources) != len(cntDraResB.ClaimResources) { + return false + } + for i := 0; i < len(cntDraResA.ClaimResources); i++ { + claimResA := cntDraResA.ClaimResources[i] + claimResB := cntDraResB.ClaimResources[i] + if len(claimResA.CDIDevices) != len(claimResB.CDIDevices) { + return false + } + for y := 0; y < len(claimResA.CDIDevices); y++ { + cdiDeviceA := claimResA.CDIDevices[y] + cdiDeviceB := claimResB.CDIDevices[y] + if cdiDeviceA.Name != cdiDeviceB.Name { + return false + } + } + } + } + + return true +} func equalContainerDevices(devA, devB []*podresourcesapi.ContainerDevices) bool { if len(devA) != len(devB) { return false @@ -581,3 +862,38 @@ func equalAllocatableResourcesResponse(respA, respB *podresourcesapi.Allocatable } return equalContainerDevices(respA.Devices, respB.Devices) } + +func equalGetResponse(ResA, ResB *podresourcesapi.GetPodResourcesResponse) bool { + podResA := ResA.PodResources + podResB := ResB.PodResources + if podResA.Name != podResB.Name { + return false + } + if podResA.Namespace != podResB.Namespace { + return false + } + if len(podResA.Containers) != len(podResB.Containers) { + return false + } + for jdx := 0; jdx < len(podResA.Containers); jdx++ { + cntA := podResA.Containers[jdx] + cntB := podResB.Containers[jdx] + + if cntA.Name != cntB.Name { + return false + } + if !equalInt64s(cntA.CpuIds, cntB.CpuIds) { + return false + } + + if !equalContainerDevices(cntA.Devices, cntB.Devices) { + return false + } + + if !euqalDynamicResources(cntA.DynamicResources, cntB.DynamicResources) { + return false + } + + } + return true +}