mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #93243 from AlexeyPerevalov/NUMAidAndCPUidsInPodResources
Implement TopologyInfo and cpu_ids in podresources interface
This commit is contained in:
commit
f5abe26a19
@ -22,34 +22,25 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
|
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1"
|
"k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// podResourcesServerV1alpha1 implements PodResourcesListerServer
|
// podResourcesServerV1alpha1 implements PodResourcesListerServer
|
||||||
type v1PodResourcesServer struct {
|
type v1PodResourcesServer struct {
|
||||||
podsProvider PodsProvider
|
podsProvider PodsProvider
|
||||||
devicesProvider DevicesProvider
|
devicesProvider DevicesProvider
|
||||||
|
cpusProvider CPUsProvider
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
|
// NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
|
||||||
// with device information provided by the DevicesProvider
|
// with device information provided by the DevicesProvider
|
||||||
func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider) v1.PodResourcesListerServer {
|
func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider, cpusProvider CPUsProvider) v1.PodResourcesListerServer {
|
||||||
return &v1PodResourcesServer{
|
return &v1PodResourcesServer{
|
||||||
podsProvider: podsProvider,
|
podsProvider: podsProvider,
|
||||||
devicesProvider: devicesProvider,
|
devicesProvider: devicesProvider,
|
||||||
|
cpusProvider: cpusProvider,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func alphaDevicesToV1(alphaDevs []*v1alpha1.ContainerDevices) []*v1.ContainerDevices {
|
|
||||||
var devs []*v1.ContainerDevices
|
|
||||||
for _, alphaDev := range alphaDevs {
|
|
||||||
dev := v1.ContainerDevices(*alphaDev)
|
|
||||||
devs = append(devs, &dev)
|
|
||||||
}
|
|
||||||
|
|
||||||
return devs
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns information about the resources assigned to pods on the node
|
// List returns information about the resources assigned to pods on the node
|
||||||
func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResourcesRequest) (*v1.ListPodResourcesResponse, error) {
|
func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResourcesRequest) (*v1.ListPodResourcesResponse, error) {
|
||||||
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
|
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
|
||||||
@ -68,7 +59,8 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource
|
|||||||
for j, container := range pod.Spec.Containers {
|
for j, container := range pod.Spec.Containers {
|
||||||
pRes.Containers[j] = &v1.ContainerResources{
|
pRes.Containers[j] = &v1.ContainerResources{
|
||||||
Name: container.Name,
|
Name: container.Name,
|
||||||
Devices: alphaDevicesToV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
|
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
|
||||||
|
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
podResources[i] = &pRes
|
podResources[i] = &pRes
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestListPodResourcesV1(t *testing.T) {
|
func TestListPodResourcesV1(t *testing.T) {
|
||||||
@ -32,24 +31,30 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
podNamespace := "pod-namespace"
|
podNamespace := "pod-namespace"
|
||||||
podUID := types.UID("pod-uid")
|
podUID := types.UID("pod-uid")
|
||||||
containerName := "container-name"
|
containerName := "container-name"
|
||||||
|
numaID := int64(1)
|
||||||
|
|
||||||
devs := []*v1alpha1.ContainerDevices{
|
devs := []*podresourcesapi.ContainerDevices{
|
||||||
{
|
{
|
||||||
ResourceName: "resource",
|
ResourceName: "resource",
|
||||||
DeviceIds: []string{"dev0", "dev1"},
|
DeviceIds: []string{"dev0", "dev1"},
|
||||||
|
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpus := []int64{12, 23, 30}
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
desc string
|
desc string
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
devices []*v1alpha1.ContainerDevices
|
devices []*podresourcesapi.ContainerDevices
|
||||||
|
cpus []int64
|
||||||
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "no pods",
|
desc: "no pods",
|
||||||
pods: []*v1.Pod{},
|
pods: []*v1.Pod{},
|
||||||
devices: []*v1alpha1.ContainerDevices{},
|
devices: []*podresourcesapi.ContainerDevices{},
|
||||||
|
cpus: []int64{},
|
||||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
|
expectedResponse: &podresourcesapi.ListPodResourcesResponse{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -70,7 +75,8 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
devices: []*v1alpha1.ContainerDevices{},
|
devices: []*podresourcesapi.ContainerDevices{},
|
||||||
|
cpus: []int64{},
|
||||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||||
PodResources: []*podresourcesapi.PodResources{
|
PodResources: []*podresourcesapi.PodResources{
|
||||||
{
|
{
|
||||||
@ -105,6 +111,7 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
devices: devs,
|
devices: devs,
|
||||||
|
cpus: cpus,
|
||||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||||
PodResources: []*podresourcesapi.PodResources{
|
PodResources: []*podresourcesapi.PodResources{
|
||||||
{
|
{
|
||||||
@ -113,7 +120,8 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
Containers: []*podresourcesapi.ContainerResources{
|
Containers: []*podresourcesapi.ContainerResources{
|
||||||
{
|
{
|
||||||
Name: containerName,
|
Name: containerName,
|
||||||
Devices: alphaDevicesToV1(devs),
|
Devices: devs,
|
||||||
|
CpuIds: cpus,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -125,8 +133,9 @@ func TestListPodResourcesV1(t *testing.T) {
|
|||||||
m := new(mockProvider)
|
m := new(mockProvider)
|
||||||
m.On("GetPods").Return(tc.pods)
|
m.On("GetPods").Return(tc.pods)
|
||||||
m.On("GetDevices", string(podUID), containerName).Return(tc.devices)
|
m.On("GetDevices", string(podUID), containerName).Return(tc.devices)
|
||||||
|
m.On("GetCPUs", string(podUID), containerName).Return(tc.cpus)
|
||||||
m.On("UpdateAllocatedDevices").Return()
|
m.On("UpdateAllocatedDevices").Return()
|
||||||
server := NewV1PodResourcesServer(m, m)
|
server := NewV1PodResourcesServer(m, m, m)
|
||||||
resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{})
|
resp, err := server.List(context.TODO(), &podresourcesapi.ListPodResourcesRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("want err = %v, got %q", nil, err)
|
t.Errorf("want err = %v, got %q", nil, err)
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
|
|
||||||
|
"k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,6 +40,19 @@ func NewV1alpha1PodResourcesServer(podsProvider PodsProvider, devicesProvider De
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func v1DevicesToAlphaV1(alphaDevs []*v1.ContainerDevices) []*v1alpha1.ContainerDevices {
|
||||||
|
var devs []*v1alpha1.ContainerDevices
|
||||||
|
for _, alphaDev := range alphaDevs {
|
||||||
|
dev := v1alpha1.ContainerDevices{
|
||||||
|
ResourceName: alphaDev.ResourceName,
|
||||||
|
DeviceIds: alphaDev.DeviceIds,
|
||||||
|
}
|
||||||
|
devs = append(devs, &dev)
|
||||||
|
}
|
||||||
|
|
||||||
|
return devs
|
||||||
|
}
|
||||||
|
|
||||||
// List returns information about the resources assigned to pods on the node
|
// List returns information about the resources assigned to pods on the node
|
||||||
func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) {
|
func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) {
|
||||||
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1alpha1").Inc()
|
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1alpha1").Inc()
|
||||||
@ -56,7 +70,7 @@ func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.Lis
|
|||||||
for j, container := range pod.Spec.Containers {
|
for j, container := range pod.Spec.Containers {
|
||||||
pRes.Containers[j] = &v1alpha1.ContainerResources{
|
pRes.Containers[j] = &v1alpha1.ContainerResources{
|
||||||
Name: container.Name,
|
Name: container.Name,
|
||||||
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
|
Devices: v1DevicesToAlphaV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
podResources[i] = &pRes
|
podResources[i] = &pRes
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,9 +38,14 @@ func (m *mockProvider) GetPods() []*v1.Pod {
|
|||||||
return args.Get(0).([]*v1.Pod)
|
return args.Get(0).([]*v1.Pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockProvider) GetDevices(podUID, containerName string) []*v1alpha1.ContainerDevices {
|
func (m *mockProvider) GetDevices(podUID, containerName string) []*podresourcesv1.ContainerDevices {
|
||||||
args := m.Called(podUID, containerName)
|
args := m.Called(podUID, containerName)
|
||||||
return args.Get(0).([]*v1alpha1.ContainerDevices)
|
return args.Get(0).([]*podresourcesv1.ContainerDevices)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockProvider) GetCPUs(podUID, containerName string) []int64 {
|
||||||
|
args := m.Called(podUID, containerName)
|
||||||
|
return args.Get(0).([]int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockProvider) UpdateAllocatedDevices() {
|
func (m *mockProvider) UpdateAllocatedDevices() {
|
||||||
@ -52,7 +58,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
|||||||
podUID := types.UID("pod-uid")
|
podUID := types.UID("pod-uid")
|
||||||
containerName := "container-name"
|
containerName := "container-name"
|
||||||
|
|
||||||
devs := []*v1alpha1.ContainerDevices{
|
devs := []*podresourcesv1.ContainerDevices{
|
||||||
{
|
{
|
||||||
ResourceName: "resource",
|
ResourceName: "resource",
|
||||||
DeviceIds: []string{"dev0", "dev1"},
|
DeviceIds: []string{"dev0", "dev1"},
|
||||||
@ -62,13 +68,13 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
|||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
desc string
|
desc string
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
devices []*v1alpha1.ContainerDevices
|
devices []*podresourcesv1.ContainerDevices
|
||||||
expectedResponse *v1alpha1.ListPodResourcesResponse
|
expectedResponse *v1alpha1.ListPodResourcesResponse
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "no pods",
|
desc: "no pods",
|
||||||
pods: []*v1.Pod{},
|
pods: []*v1.Pod{},
|
||||||
devices: []*v1alpha1.ContainerDevices{},
|
devices: []*podresourcesv1.ContainerDevices{},
|
||||||
expectedResponse: &v1alpha1.ListPodResourcesResponse{},
|
expectedResponse: &v1alpha1.ListPodResourcesResponse{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -89,7 +95,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
devices: []*v1alpha1.ContainerDevices{},
|
devices: []*podresourcesv1.ContainerDevices{},
|
||||||
expectedResponse: &v1alpha1.ListPodResourcesResponse{
|
expectedResponse: &v1alpha1.ListPodResourcesResponse{
|
||||||
PodResources: []*v1alpha1.PodResources{
|
PodResources: []*v1alpha1.PodResources{
|
||||||
{
|
{
|
||||||
@ -132,7 +138,7 @@ func TestListPodResourcesV1alpha1(t *testing.T) {
|
|||||||
Containers: []*v1alpha1.ContainerResources{
|
Containers: []*v1alpha1.ContainerResources{
|
||||||
{
|
{
|
||||||
Name: containerName,
|
Name: containerName,
|
||||||
Devices: devs,
|
Devices: v1DevicesToAlphaV1(devs),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -18,12 +18,12 @@ package podresources
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DevicesProvider knows how to provide the devices used by the given container
|
// DevicesProvider knows how to provide the devices used by the given container
|
||||||
type DevicesProvider interface {
|
type DevicesProvider interface {
|
||||||
GetDevices(podUID, containerName string) []*v1alpha1.ContainerDevices
|
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||||
UpdateAllocatedDevices()
|
UpdateAllocatedDevices()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,3 +31,8 @@ type DevicesProvider interface {
|
|||||||
type PodsProvider interface {
|
type PodsProvider interface {
|
||||||
GetPods() []*v1.Pod
|
GetPods() []*v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CPUsProvider knows how to provide the cpus used by the given container
|
||||||
|
type CPUsProvider interface {
|
||||||
|
GetCPUs(podUID, containerName string) []int64
|
||||||
|
}
|
||||||
|
@ -42,7 +42,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/cri-api/pkg/apis:go_default_library",
|
"//staging/src/k8s.io/cri-api/pkg/apis:go_default_library",
|
||||||
"//staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1:go_default_library",
|
"//staging/src/k8s.io/kubelet/pkg/apis/podresources/v1:go_default_library",
|
||||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||||
] + select({
|
] + select({
|
||||||
"@io_bazel_rules_go//go/platform:aix": [
|
"@io_bazel_rules_go//go/platform:aix": [
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
// TODO: Migrate kubelet to either use its own internal objects or client library.
|
// TODO: Migrate kubelet to either use its own internal objects or client library.
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
@ -106,6 +106,9 @@ type ContainerManager interface {
|
|||||||
// GetDevices returns information about the devices assigned to pods and containers
|
// GetDevices returns information about the devices assigned to pods and containers
|
||||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||||
|
|
||||||
|
// GetCPUs returns information about the cpus assigned to pods and containers
|
||||||
|
GetCPUs(podUID, containerName string) []int64
|
||||||
|
|
||||||
// ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed,
|
// ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed,
|
||||||
// due to node recreation.
|
// due to node recreation.
|
||||||
ShouldResetExtendedResourceCapacity() bool
|
ShouldResetExtendedResourceCapacity() bool
|
||||||
|
@ -47,7 +47,7 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||||
@ -1027,6 +1027,10 @@ func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podr
|
|||||||
return cm.deviceManager.GetDevices(podUID, containerName)
|
return cm.deviceManager.GetDevices(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
|
||||||
|
return cm.cpuManager.GetCPUs(podUID, containerName)
|
||||||
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
||||||
return cm.deviceManager.ShouldResetExtendedResourceCapacity()
|
return cm.deviceManager.ShouldResetExtendedResourceCapacity()
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
@ -125,6 +125,10 @@ func (cm *containerManagerStub) UpdateAllocatedDevices() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerStub) GetCPUs(_, _ string) []int64 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func NewStubContainerManager() ContainerManager {
|
func NewStubContainerManager() ContainerManager {
|
||||||
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
|
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
@ -192,3 +192,7 @@ func (cm *containerManagerImpl) GetAllocateResourcesPodAdmitHandler() lifecycle.
|
|||||||
func (cm *containerManagerImpl) UpdateAllocatedDevices() {
|
func (cm *containerManagerImpl) UpdateAllocatedDevices() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerImpl) GetCPUs(_, _ string) []int64 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -77,6 +77,10 @@ type Manager interface {
|
|||||||
// and is consulted to achieve NUMA aware resource alignment among this
|
// and is consulted to achieve NUMA aware resource alignment among this
|
||||||
// and other resource controllers.
|
// and other resource controllers.
|
||||||
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
|
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
|
||||||
|
|
||||||
|
// GetCPUs implements the podresources.CPUsProvider interface to provide allocated
|
||||||
|
// cpus for the container
|
||||||
|
GetCPUs(podUID, containerName string) []int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type manager struct {
|
type manager struct {
|
||||||
@ -461,3 +465,12 @@ func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet)
|
|||||||
CpusetCpus: cpus.String(),
|
CpusetCpus: cpus.String(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *manager) GetCPUs(podUID, containerName string) []int64 {
|
||||||
|
cpus := m.state.GetCPUSetOrDefault(string(podUID), containerName)
|
||||||
|
result := []int64{}
|
||||||
|
for _, cpu := range cpus.ToSliceNoSort() {
|
||||||
|
result = append(result, int64(cpu))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
@ -64,6 +64,11 @@ func (m *fakeManager) State() state.Reader {
|
|||||||
return m.state
|
return m.state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *fakeManager) GetCPUs(podUID, containerName string) []int64 {
|
||||||
|
klog.Infof("[fake cpumanager] GetCPUs(podUID: %s, containerName: %s)", podUID, containerName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewFakeManager creates empty/fake cpu manager
|
// NewFakeManager creates empty/fake cpu manager
|
||||||
func NewFakeManager() Manager {
|
func NewFakeManager() Manager {
|
||||||
return &fakeManager{
|
return &fakeManager{
|
||||||
|
@ -36,7 +36,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1:go_default_library",
|
"//staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library",
|
"//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1:go_default_library",
|
"//staging/src/k8s.io/kubelet/pkg/apis/podresources/v1:go_default_library",
|
||||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||||
@ -48,11 +48,13 @@ go_test(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"endpoint_test.go",
|
"endpoint_test.go",
|
||||||
"manager_test.go",
|
"manager_test.go",
|
||||||
|
"pod_devices_test.go",
|
||||||
"topology_hints_test.go",
|
"topology_hints_test.go",
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||||
|
"//pkg/kubelet/cm/devicemanager/checkpoint:go_default_library",
|
||||||
"//pkg/kubelet/cm/topologymanager:go_default_library",
|
"//pkg/kubelet/cm/topologymanager:go_default_library",
|
||||||
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
|
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
|
||||||
"//pkg/kubelet/config:go_default_library",
|
"//pkg/kubelet/config:go_default_library",
|
||||||
|
@ -8,6 +8,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||||
"//pkg/kubelet/checkpointmanager/checksum:go_default_library",
|
"//pkg/kubelet/checkpointmanager/checksum:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ package checkpoint
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||||
)
|
)
|
||||||
@ -29,12 +30,15 @@ type DeviceManagerCheckpoint interface {
|
|||||||
GetData() ([]PodDevicesEntry, map[string][]string)
|
GetData() ([]PodDevicesEntry, map[string][]string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DevicesPerNUMA represents device ids obtained from device plugin per NUMA node id
|
||||||
|
type DevicesPerNUMA map[int64][]string
|
||||||
|
|
||||||
// PodDevicesEntry connects pod information to devices
|
// PodDevicesEntry connects pod information to devices
|
||||||
type PodDevicesEntry struct {
|
type PodDevicesEntry struct {
|
||||||
PodUID string
|
PodUID string
|
||||||
ContainerName string
|
ContainerName string
|
||||||
ResourceName string
|
ResourceName string
|
||||||
DeviceIDs []string
|
DeviceIDs DevicesPerNUMA
|
||||||
AllocResp []byte
|
AllocResp []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,6 +56,22 @@ type Data struct {
|
|||||||
Checksum checksum.Checksum
|
Checksum checksum.Checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewDevicesPerNUMA is a function that creates DevicesPerNUMA map
|
||||||
|
func NewDevicesPerNUMA() DevicesPerNUMA {
|
||||||
|
return make(DevicesPerNUMA)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Devices is a function that returns all device ids for all NUMA nodes
|
||||||
|
// and represent it as sets.String
|
||||||
|
func (dev DevicesPerNUMA) Devices() sets.String {
|
||||||
|
result := sets.NewString()
|
||||||
|
|
||||||
|
for _, devs := range dev {
|
||||||
|
result.Insert(devs...)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// New returns an instance of Checkpoint
|
// New returns an instance of Checkpoint
|
||||||
func New(devEntries []PodDevicesEntry,
|
func New(devEntries []PodDevicesEntry,
|
||||||
devices map[string][]string) DeviceManagerCheckpoint {
|
devices map[string][]string) DeviceManagerCheckpoint {
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||||
@ -96,7 +96,7 @@ type ManagerImpl struct {
|
|||||||
allocatedDevices map[string]sets.String
|
allocatedDevices map[string]sets.String
|
||||||
|
|
||||||
// podDevices contains pod to allocated device mapping.
|
// podDevices contains pod to allocated device mapping.
|
||||||
podDevices podDevices
|
podDevices *podDevices
|
||||||
checkpointManager checkpointmanager.CheckpointManager
|
checkpointManager checkpointmanager.CheckpointManager
|
||||||
|
|
||||||
// List of NUMA Nodes available on the underlying machine
|
// List of NUMA Nodes available on the underlying machine
|
||||||
@ -150,7 +150,7 @@ func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffi
|
|||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
unhealthyDevices: make(map[string]sets.String),
|
unhealthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
numaNodes: numaNodes,
|
numaNodes: numaNodes,
|
||||||
topologyAffinityStore: topologyAffinityStore,
|
topologyAffinityStore: topologyAffinityStore,
|
||||||
devicesToReuse: make(PodReusableDevices),
|
devicesToReuse: make(PodReusableDevices),
|
||||||
@ -393,11 +393,8 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
|
|||||||
func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
|
func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
|
||||||
pod := attrs.Pod
|
pod := attrs.Pod
|
||||||
|
|
||||||
m.mutex.Lock()
|
|
||||||
defer m.mutex.Unlock()
|
|
||||||
|
|
||||||
// quick return if no pluginResources requested
|
// quick return if no pluginResources requested
|
||||||
if _, podRequireDevicePluginResource := m.podDevices[string(pod.UID)]; !podRequireDevicePluginResource {
|
if !m.podDevices.hasPod(string(pod.UID)) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -903,10 +900,17 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
|||||||
return fmt.Errorf("no containers return in allocation response %v", resp)
|
return fmt.Errorf("no containers return in allocation response %v", resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
allocDevicesWithNUMA := checkpoint.NewDevicesPerNUMA()
|
||||||
// Update internal cached podDevices state.
|
// Update internal cached podDevices state.
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
m.podDevices.insert(podUID, contName, resource, allocDevices, resp.ContainerResponses[0])
|
for dev := range allocDevices {
|
||||||
|
for idx := range m.allDevices[resource][dev].Topology.Nodes {
|
||||||
|
node := m.allDevices[resource][dev].Topology.Nodes[idx]
|
||||||
|
allocDevicesWithNUMA[node.ID] = append(allocDevicesWithNUMA[node.ID], dev)
|
||||||
|
}
|
||||||
|
}
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
|
m.podDevices.insert(podUID, contName, resource, allocDevicesWithNUMA, resp.ContainerResponses[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
if needsUpdateCheckpoint {
|
if needsUpdateCheckpoint {
|
||||||
@ -945,8 +949,6 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.mutex.Lock()
|
|
||||||
defer m.mutex.Unlock()
|
|
||||||
return m.podDevices.deviceRunContainerOptions(string(pod.UID), container.Name), nil
|
return m.podDevices.deviceRunContainerOptions(string(pod.UID), container.Name), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1019,6 +1021,9 @@ func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo)
|
|||||||
if allocatableResource.ScalarResources == nil {
|
if allocatableResource.ScalarResources == nil {
|
||||||
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)
|
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.mutex.Lock()
|
||||||
|
defer m.mutex.Unlock()
|
||||||
for resource, devices := range m.allocatedDevices {
|
for resource, devices := range m.allocatedDevices {
|
||||||
needed := devices.Len()
|
needed := devices.Len()
|
||||||
quant, ok := allocatableResource.ScalarResources[v1.ResourceName(resource)]
|
quant, ok := allocatableResource.ScalarResources[v1.ResourceName(resource)]
|
||||||
@ -1038,6 +1043,8 @@ func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *ManagerImpl) isDevicePluginResource(resource string) bool {
|
func (m *ManagerImpl) isDevicePluginResource(resource string) bool {
|
||||||
|
m.mutex.Lock()
|
||||||
|
defer m.mutex.Unlock()
|
||||||
_, registeredResource := m.healthyDevices[resource]
|
_, registeredResource := m.healthyDevices[resource]
|
||||||
_, allocatedResource := m.allocatedDevices[resource]
|
_, allocatedResource := m.allocatedDevices[resource]
|
||||||
// Return true if this is either an active device plugin resource or
|
// Return true if this is either an active device plugin resource or
|
||||||
@ -1050,8 +1057,6 @@ func (m *ManagerImpl) isDevicePluginResource(resource string) bool {
|
|||||||
|
|
||||||
// GetDevices returns the devices used by the specified container
|
// GetDevices returns the devices used by the specified container
|
||||||
func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
||||||
m.mutex.Lock()
|
|
||||||
defer m.mutex.Unlock()
|
|
||||||
return m.podDevices.getContainerDevices(podUID, containerName)
|
return m.podDevices.getContainerDevices(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ package devicemanager
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||||
|
@ -38,6 +38,7 @@ import (
|
|||||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||||
watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
|
watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||||
@ -432,10 +433,10 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
|||||||
as.True(testManager.isDevicePluginResource(resourceName2))
|
as.True(testManager.isDevicePluginResource(resourceName2))
|
||||||
}
|
}
|
||||||
|
|
||||||
func constructDevices(devices []string) sets.String {
|
func constructDevices(devices []string) checkpoint.DevicesPerNUMA {
|
||||||
ret := sets.NewString()
|
ret := checkpoint.DevicesPerNUMA{}
|
||||||
for _, dev := range devices {
|
for _, dev := range devices {
|
||||||
ret.Insert(dev)
|
ret[0] = append(ret[0], dev)
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@ -477,7 +478,7 @@ func TestCheckpoint(t *testing.T) {
|
|||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
unhealthyDevices: make(map[string]sets.String),
|
unhealthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
checkpointManager: ckm,
|
checkpointManager: ckm,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,12 +517,12 @@ func TestCheckpoint(t *testing.T) {
|
|||||||
err = testManager.writeCheckpoint()
|
err = testManager.writeCheckpoint()
|
||||||
|
|
||||||
as.Nil(err)
|
as.Nil(err)
|
||||||
testManager.podDevices = make(podDevices)
|
testManager.podDevices = newPodDevices()
|
||||||
err = testManager.readCheckpoint()
|
err = testManager.readCheckpoint()
|
||||||
as.Nil(err)
|
as.Nil(err)
|
||||||
|
|
||||||
as.Equal(len(expectedPodDevices), len(testManager.podDevices))
|
as.Equal(expectedPodDevices.size(), testManager.podDevices.size())
|
||||||
for podUID, containerDevices := range expectedPodDevices {
|
for podUID, containerDevices := range expectedPodDevices.devs {
|
||||||
for conName, resources := range containerDevices {
|
for conName, resources := range containerDevices {
|
||||||
for resource := range resources {
|
for resource := range resources {
|
||||||
expDevices := expectedPodDevices.containerDevices(podUID, conName, resource)
|
expDevices := expectedPodDevices.containerDevices(podUID, conName, resource)
|
||||||
@ -615,19 +616,17 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
|||||||
unhealthyDevices: make(map[string]sets.String),
|
unhealthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
endpoints: make(map[string]endpointInfo),
|
endpoints: make(map[string]endpointInfo),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
devicesToReuse: make(PodReusableDevices),
|
devicesToReuse: make(PodReusableDevices),
|
||||||
topologyAffinityStore: topologymanager.NewFakeManager(),
|
topologyAffinityStore: topologymanager.NewFakeManager(),
|
||||||
activePods: activePods,
|
activePods: activePods,
|
||||||
sourcesReady: &sourcesReadyStub{},
|
sourcesReady: &sourcesReadyStub{},
|
||||||
checkpointManager: ckm,
|
checkpointManager: ckm,
|
||||||
|
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, res := range testRes {
|
for _, res := range testRes {
|
||||||
testManager.healthyDevices[res.resourceName] = sets.NewString()
|
testManager.healthyDevices[res.resourceName] = sets.NewString(res.devs.Devices().UnsortedList()...)
|
||||||
for _, dev := range res.devs {
|
|
||||||
testManager.healthyDevices[res.resourceName].Insert(dev)
|
|
||||||
}
|
|
||||||
if res.resourceName == "domain1.com/resource1" {
|
if res.resourceName == "domain1.com/resource1" {
|
||||||
testManager.endpoints[res.resourceName] = endpointInfo{
|
testManager.endpoints[res.resourceName] = endpointInfo{
|
||||||
e: &MockEndpoint{allocateFunc: allocateStubFunc()},
|
e: &MockEndpoint{allocateFunc: allocateStubFunc()},
|
||||||
@ -657,6 +656,8 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
|||||||
opts: nil,
|
opts: nil,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
testManager.allDevices[res.resourceName] = makeDevice(res.devs)
|
||||||
|
|
||||||
}
|
}
|
||||||
return testManager, nil
|
return testManager, nil
|
||||||
}
|
}
|
||||||
@ -664,19 +665,19 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
|||||||
type TestResource struct {
|
type TestResource struct {
|
||||||
resourceName string
|
resourceName string
|
||||||
resourceQuantity resource.Quantity
|
resourceQuantity resource.Quantity
|
||||||
devs []string
|
devs checkpoint.DevicesPerNUMA
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodContainerDeviceAllocation(t *testing.T) {
|
func TestPodContainerDeviceAllocation(t *testing.T) {
|
||||||
res1 := TestResource{
|
res1 := TestResource{
|
||||||
resourceName: "domain1.com/resource1",
|
resourceName: "domain1.com/resource1",
|
||||||
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
|
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
|
||||||
devs: []string{"dev1", "dev2"},
|
devs: checkpoint.DevicesPerNUMA{0: []string{"dev1", "dev2"}},
|
||||||
}
|
}
|
||||||
res2 := TestResource{
|
res2 := TestResource{
|
||||||
resourceName: "domain2.com/resource2",
|
resourceName: "domain2.com/resource2",
|
||||||
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
|
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
|
||||||
devs: []string{"dev3", "dev4"},
|
devs: checkpoint.DevicesPerNUMA{0: []string{"dev3", "dev4"}},
|
||||||
}
|
}
|
||||||
testResources := make([]TestResource, 2)
|
testResources := make([]TestResource, 2)
|
||||||
testResources = append(testResources, res1)
|
testResources = append(testResources, res1)
|
||||||
@ -767,12 +768,12 @@ func TestInitContainerDeviceAllocation(t *testing.T) {
|
|||||||
res1 := TestResource{
|
res1 := TestResource{
|
||||||
resourceName: "domain1.com/resource1",
|
resourceName: "domain1.com/resource1",
|
||||||
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
|
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
|
||||||
devs: []string{"dev1", "dev2"},
|
devs: checkpoint.DevicesPerNUMA{0: []string{"dev1", "dev2"}},
|
||||||
}
|
}
|
||||||
res2 := TestResource{
|
res2 := TestResource{
|
||||||
resourceName: "domain2.com/resource2",
|
resourceName: "domain2.com/resource2",
|
||||||
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
|
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
|
||||||
devs: []string{"dev3", "dev4"},
|
devs: checkpoint.DevicesPerNUMA{0: []string{"dev3", "dev4"}},
|
||||||
}
|
}
|
||||||
testResources := make([]TestResource, 2)
|
testResources := make([]TestResource, 2)
|
||||||
testResources = append(testResources, res1)
|
testResources = append(testResources, res1)
|
||||||
@ -882,10 +883,10 @@ func TestUpdatePluginResources(t *testing.T) {
|
|||||||
callback: monitorCallback,
|
callback: monitorCallback,
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
checkpointManager: ckm,
|
checkpointManager: ckm,
|
||||||
}
|
}
|
||||||
testManager.podDevices[string(pod.UID)] = make(containerDevices)
|
testManager.podDevices.devs[string(pod.UID)] = make(containerDevices)
|
||||||
|
|
||||||
// require one of resource1 and one of resource2
|
// require one of resource1 and one of resource2
|
||||||
testManager.allocatedDevices[resourceName1] = sets.NewString()
|
testManager.allocatedDevices[resourceName1] = sets.NewString()
|
||||||
@ -920,7 +921,7 @@ func TestDevicePreStartContainer(t *testing.T) {
|
|||||||
res1 := TestResource{
|
res1 := TestResource{
|
||||||
resourceName: "domain1.com/resource1",
|
resourceName: "domain1.com/resource1",
|
||||||
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
|
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
|
||||||
devs: []string{"dev1", "dev2"},
|
devs: checkpoint.DevicesPerNUMA{0: []string{"dev1", "dev2"}},
|
||||||
}
|
}
|
||||||
as := require.New(t)
|
as := require.New(t)
|
||||||
podsStub := activePodsStub{
|
podsStub := activePodsStub{
|
||||||
@ -960,7 +961,7 @@ func TestDevicePreStartContainer(t *testing.T) {
|
|||||||
|
|
||||||
as.Contains(initializedDevs, "dev1")
|
as.Contains(initializedDevs, "dev1")
|
||||||
as.Contains(initializedDevs, "dev2")
|
as.Contains(initializedDevs, "dev2")
|
||||||
as.Equal(len(initializedDevs), len(res1.devs))
|
as.Equal(len(initializedDevs), res1.devs.Devices().Len())
|
||||||
|
|
||||||
expectedResps, err := allocateStubFunc()([]string{"dev1", "dev2"})
|
expectedResps, err := allocateStubFunc()([]string{"dev1", "dev2"})
|
||||||
as.Nil(err)
|
as.Nil(err)
|
||||||
@ -983,7 +984,7 @@ func TestResetExtendedResource(t *testing.T) {
|
|||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
unhealthyDevices: make(map[string]sets.String),
|
unhealthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
checkpointManager: ckm,
|
checkpointManager: ckm,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1057,3 +1058,13 @@ func allocateStubFunc() func(devs []string) (*pluginapi.AllocateResponse, error)
|
|||||||
return resps, nil
|
return resps, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeDevice(devOnNUMA checkpoint.DevicesPerNUMA) map[string]pluginapi.Device {
|
||||||
|
res := make(map[string]pluginapi.Device)
|
||||||
|
for node, devs := range devOnNUMA {
|
||||||
|
for idx := range devs {
|
||||||
|
res[devs[idx]] = pluginapi.Device{ID: devs[idx], Topology: &pluginapi.TopologyInfo{Nodes: []*pluginapi.NUMANode{{ID: node}}}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
@ -17,72 +17,105 @@ limitations under the License.
|
|||||||
package devicemanager
|
package devicemanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
)
|
)
|
||||||
|
|
||||||
type deviceAllocateInfo struct {
|
type deviceAllocateInfo struct {
|
||||||
// deviceIds contains device Ids allocated to this container for the given resourceName.
|
// deviceIds contains device Ids allocated to this container for the given resourceName.
|
||||||
deviceIds sets.String
|
deviceIds checkpoint.DevicesPerNUMA
|
||||||
// allocResp contains cached rpc AllocateResponse.
|
// allocResp contains cached rpc AllocateResponse.
|
||||||
allocResp *pluginapi.ContainerAllocateResponse
|
allocResp *pluginapi.ContainerAllocateResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
type resourceAllocateInfo map[string]deviceAllocateInfo // Keyed by resourceName.
|
type resourceAllocateInfo map[string]deviceAllocateInfo // Keyed by resourceName.
|
||||||
type containerDevices map[string]resourceAllocateInfo // Keyed by containerName.
|
type containerDevices map[string]resourceAllocateInfo // Keyed by containerName.
|
||||||
type podDevices map[string]containerDevices // Keyed by podUID.
|
type podDevices struct {
|
||||||
|
sync.RWMutex
|
||||||
|
devs map[string]containerDevices // Keyed by podUID.
|
||||||
|
}
|
||||||
|
|
||||||
func (pdev podDevices) pods() sets.String {
|
// NewPodDevices is a function that returns object of podDevices type with its own guard
|
||||||
|
// RWMutex and a map where key is a pod UID and value contains
|
||||||
|
// container devices information of type containerDevices.
|
||||||
|
func newPodDevices() *podDevices {
|
||||||
|
return &podDevices{devs: make(map[string]containerDevices)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pdev *podDevices) pods() sets.String {
|
||||||
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
ret := sets.NewString()
|
ret := sets.NewString()
|
||||||
for k := range pdev {
|
for k := range pdev.devs {
|
||||||
ret.Insert(k)
|
ret.Insert(k)
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pdev podDevices) insert(podUID, contName, resource string, devices sets.String, resp *pluginapi.ContainerAllocateResponse) {
|
func (pdev *podDevices) size() int {
|
||||||
if _, podExists := pdev[podUID]; !podExists {
|
pdev.RLock()
|
||||||
pdev[podUID] = make(containerDevices)
|
defer pdev.RUnlock()
|
||||||
|
return len(pdev.devs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pdev *podDevices) hasPod(podUID string) bool {
|
||||||
|
_, podExists := pdev.devs[podUID]
|
||||||
|
return podExists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pdev *podDevices) insert(podUID, contName, resource string, devices checkpoint.DevicesPerNUMA, resp *pluginapi.ContainerAllocateResponse) {
|
||||||
|
pdev.Lock()
|
||||||
|
defer pdev.Unlock()
|
||||||
|
if _, podExists := pdev.devs[podUID]; !podExists {
|
||||||
|
pdev.devs[podUID] = make(containerDevices)
|
||||||
}
|
}
|
||||||
if _, contExists := pdev[podUID][contName]; !contExists {
|
if _, contExists := pdev.devs[podUID][contName]; !contExists {
|
||||||
pdev[podUID][contName] = make(resourceAllocateInfo)
|
pdev.devs[podUID][contName] = make(resourceAllocateInfo)
|
||||||
}
|
}
|
||||||
pdev[podUID][contName][resource] = deviceAllocateInfo{
|
pdev.devs[podUID][contName][resource] = deviceAllocateInfo{
|
||||||
deviceIds: devices,
|
deviceIds: devices,
|
||||||
allocResp: resp,
|
allocResp: resp,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pdev podDevices) delete(pods []string) {
|
func (pdev *podDevices) delete(pods []string) {
|
||||||
|
pdev.Lock()
|
||||||
|
defer pdev.Unlock()
|
||||||
for _, uid := range pods {
|
for _, uid := range pods {
|
||||||
delete(pdev, uid)
|
delete(pdev.devs, uid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns list of device Ids allocated to the given container for the given resource.
|
// Returns list of device Ids allocated to the given container for the given resource.
|
||||||
// Returns nil if we don't have cached state for the given <podUID, contName, resource>.
|
// Returns nil if we don't have cached state for the given <podUID, contName, resource>.
|
||||||
func (pdev podDevices) containerDevices(podUID, contName, resource string) sets.String {
|
func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.String {
|
||||||
if _, podExists := pdev[podUID]; !podExists {
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
if _, podExists := pdev.devs[podUID]; !podExists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, contExists := pdev[podUID][contName]; !contExists {
|
if _, contExists := pdev.devs[podUID][contName]; !contExists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
devs, resourceExists := pdev[podUID][contName][resource]
|
devs, resourceExists := pdev.devs[podUID][contName][resource]
|
||||||
if !resourceExists {
|
if !resourceExists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return devs.deviceIds
|
return devs.deviceIds.Devices()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Populates allocatedResources with the device resources allocated to the specified <podUID, contName>.
|
// Populates allocatedResources with the device resources allocated to the specified <podUID, contName>.
|
||||||
func (pdev podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) {
|
func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) {
|
||||||
containers, exists := pdev[podUID]
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
containers, exists := pdev.devs[podUID]
|
||||||
if !exists {
|
if !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -91,13 +124,15 @@ func (pdev podDevices) addContainerAllocatedResources(podUID, contName string, a
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for resource, devices := range resources {
|
for resource, devices := range resources {
|
||||||
allocatedResources[resource] = allocatedResources[resource].Union(devices.deviceIds)
|
allocatedResources[resource] = allocatedResources[resource].Union(devices.deviceIds.Devices())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes the device resources allocated to the specified <podUID, contName> from allocatedResources.
|
// Removes the device resources allocated to the specified <podUID, contName> from allocatedResources.
|
||||||
func (pdev podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) {
|
func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) {
|
||||||
containers, exists := pdev[podUID]
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
containers, exists := pdev.devs[podUID]
|
||||||
if !exists {
|
if !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -106,21 +141,23 @@ func (pdev podDevices) removeContainerAllocatedResources(podUID, contName string
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for resource, devices := range resources {
|
for resource, devices := range resources {
|
||||||
allocatedResources[resource] = allocatedResources[resource].Difference(devices.deviceIds)
|
allocatedResources[resource] = allocatedResources[resource].Difference(devices.deviceIds.Devices())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns all of devices allocated to the pods being tracked, keyed by resourceName.
|
// Returns all of devices allocated to the pods being tracked, keyed by resourceName.
|
||||||
func (pdev podDevices) devices() map[string]sets.String {
|
func (pdev *podDevices) devices() map[string]sets.String {
|
||||||
ret := make(map[string]sets.String)
|
ret := make(map[string]sets.String)
|
||||||
for _, containerDevices := range pdev {
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
for _, containerDevices := range pdev.devs {
|
||||||
for _, resources := range containerDevices {
|
for _, resources := range containerDevices {
|
||||||
for resource, devices := range resources {
|
for resource, devices := range resources {
|
||||||
if _, exists := ret[resource]; !exists {
|
if _, exists := ret[resource]; !exists {
|
||||||
ret[resource] = sets.NewString()
|
ret[resource] = sets.NewString()
|
||||||
}
|
}
|
||||||
if devices.allocResp != nil {
|
if devices.allocResp != nil {
|
||||||
ret[resource] = ret[resource].Union(devices.deviceIds)
|
ret[resource] = ret[resource].Union(devices.deviceIds.Devices())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,12 +166,13 @@ func (pdev podDevices) devices() map[string]sets.String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Turns podDevices to checkpointData.
|
// Turns podDevices to checkpointData.
|
||||||
func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
|
func (pdev *podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
|
||||||
var data []checkpoint.PodDevicesEntry
|
var data []checkpoint.PodDevicesEntry
|
||||||
for podUID, containerDevices := range pdev {
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
for podUID, containerDevices := range pdev.devs {
|
||||||
for conName, resources := range containerDevices {
|
for conName, resources := range containerDevices {
|
||||||
for resource, devices := range resources {
|
for resource, devices := range resources {
|
||||||
devIds := devices.deviceIds.UnsortedList()
|
|
||||||
if devices.allocResp == nil {
|
if devices.allocResp == nil {
|
||||||
klog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource)
|
klog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource)
|
||||||
continue
|
continue
|
||||||
@ -149,7 +187,7 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
|
|||||||
PodUID: podUID,
|
PodUID: podUID,
|
||||||
ContainerName: conName,
|
ContainerName: conName,
|
||||||
ResourceName: resource,
|
ResourceName: resource,
|
||||||
DeviceIDs: devIds,
|
DeviceIDs: devices.deviceIds,
|
||||||
AllocResp: allocResp})
|
AllocResp: allocResp})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -158,27 +196,26 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Populates podDevices from the passed in checkpointData.
|
// Populates podDevices from the passed in checkpointData.
|
||||||
func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) {
|
func (pdev *podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) {
|
||||||
for _, entry := range data {
|
for _, entry := range data {
|
||||||
klog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n",
|
klog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n",
|
||||||
entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, entry.AllocResp)
|
entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, entry.AllocResp)
|
||||||
devIDs := sets.NewString()
|
|
||||||
for _, devID := range entry.DeviceIDs {
|
|
||||||
devIDs.Insert(devID)
|
|
||||||
}
|
|
||||||
allocResp := &pluginapi.ContainerAllocateResponse{}
|
allocResp := &pluginapi.ContainerAllocateResponse{}
|
||||||
err := allocResp.Unmarshal(entry.AllocResp)
|
err := allocResp.Unmarshal(entry.AllocResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err)
|
klog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, devIDs, allocResp)
|
pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, allocResp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns combined container runtime settings to consume the container's allocated devices.
|
// Returns combined container runtime settings to consume the container's allocated devices.
|
||||||
func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *DeviceRunContainerOptions {
|
func (pdev *podDevices) deviceRunContainerOptions(podUID, contName string) *DeviceRunContainerOptions {
|
||||||
containers, exists := pdev[podUID]
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
|
||||||
|
containers, exists := pdev.devs[podUID]
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -274,19 +311,25 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getContainerDevices returns the devices assigned to the provided container for all ResourceNames
|
// getContainerDevices returns the devices assigned to the provided container for all ResourceNames
|
||||||
func (pdev podDevices) getContainerDevices(podUID, contName string) []*podresourcesapi.ContainerDevices {
|
func (pdev *podDevices) getContainerDevices(podUID, contName string) []*podresourcesapi.ContainerDevices {
|
||||||
if _, podExists := pdev[podUID]; !podExists {
|
pdev.RLock()
|
||||||
|
defer pdev.RUnlock()
|
||||||
|
|
||||||
|
if _, podExists := pdev.devs[podUID]; !podExists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, contExists := pdev[podUID][contName]; !contExists {
|
if _, contExists := pdev.devs[podUID][contName]; !contExists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cDev := []*podresourcesapi.ContainerDevices{}
|
cDev := []*podresourcesapi.ContainerDevices{}
|
||||||
for resource, allocateInfo := range pdev[podUID][contName] {
|
for resource, allocateInfo := range pdev.devs[podUID][contName] {
|
||||||
|
for numaid, devlist := range allocateInfo.deviceIds {
|
||||||
cDev = append(cDev, &podresourcesapi.ContainerDevices{
|
cDev = append(cDev, &podresourcesapi.ContainerDevices{
|
||||||
ResourceName: resource,
|
ResourceName: resource,
|
||||||
DeviceIds: allocateInfo.deviceIds.UnsortedList(),
|
DeviceIds: devlist,
|
||||||
|
Topology: &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaid}}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return cDev
|
return cDev
|
||||||
}
|
}
|
||||||
|
47
pkg/kubelet/cm/devicemanager/pod_devices_test.go
Normal file
47
pkg/kubelet/cm/devicemanager/pod_devices_test.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package devicemanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetContainerDevices(t *testing.T) {
|
||||||
|
podDevices := newPodDevices()
|
||||||
|
resourceName1 := "domain1.com/resource1"
|
||||||
|
podID := "pod1"
|
||||||
|
contID := "con1"
|
||||||
|
devices := checkpoint.DevicesPerNUMA{0: []string{"dev1"}, 1: []string{"dev1"}}
|
||||||
|
|
||||||
|
podDevices.insert(podID, contID, resourceName1,
|
||||||
|
devices,
|
||||||
|
constructAllocResp(map[string]string{"/dev/r1dev1": "/dev/r1dev1", "/dev/r1dev2": "/dev/r1dev2"}, map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{}))
|
||||||
|
|
||||||
|
contDevices := podDevices.getContainerDevices(podID, contID)
|
||||||
|
require.Equal(t, len(devices), len(contDevices), "Incorrect container devices")
|
||||||
|
for _, contDev := range contDevices {
|
||||||
|
for _, node := range contDev.Topology.Nodes {
|
||||||
|
dev, ok := devices[node.ID]
|
||||||
|
require.True(t, ok, "NUMA id %v doesn't exist in result", node.ID)
|
||||||
|
require.Equal(t, contDev.DeviceIds[0], dev[0], "Can't find device %s in result", dev[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -385,7 +385,7 @@ func TestGetTopologyHints(t *testing.T) {
|
|||||||
allDevices: make(map[string]map[string]pluginapi.Device),
|
allDevices: make(map[string]map[string]pluginapi.Device),
|
||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
sourcesReady: &sourcesReadyStub{},
|
sourcesReady: &sourcesReadyStub{},
|
||||||
activePods: func() []*v1.Pod { return []*v1.Pod{pod} },
|
activePods: func() []*v1.Pod { return []*v1.Pod{pod} },
|
||||||
numaNodes: []int{0, 1},
|
numaNodes: []int{0, 1},
|
||||||
@ -404,7 +404,7 @@ func TestGetTopologyHints(t *testing.T) {
|
|||||||
for p := range tc.allocatedDevices {
|
for p := range tc.allocatedDevices {
|
||||||
for c := range tc.allocatedDevices[p] {
|
for c := range tc.allocatedDevices[p] {
|
||||||
for r, devices := range tc.allocatedDevices[p][c] {
|
for r, devices := range tc.allocatedDevices[p][c] {
|
||||||
m.podDevices.insert(p, c, r, sets.NewString(devices...), nil)
|
m.podDevices.insert(p, c, r, constructDevices(devices), nil)
|
||||||
|
|
||||||
m.allocatedDevices[r] = sets.NewString()
|
m.allocatedDevices[r] = sets.NewString()
|
||||||
for _, d := range devices {
|
for _, d := range devices {
|
||||||
@ -739,7 +739,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
|||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
endpoints: make(map[string]endpointInfo),
|
endpoints: make(map[string]endpointInfo),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
sourcesReady: &sourcesReadyStub{},
|
sourcesReady: &sourcesReadyStub{},
|
||||||
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
||||||
topologyAffinityStore: &mockAffinityStore{tc.hint},
|
topologyAffinityStore: &mockAffinityStore{tc.hint},
|
||||||
@ -928,7 +928,7 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
|
|||||||
healthyDevices: make(map[string]sets.String),
|
healthyDevices: make(map[string]sets.String),
|
||||||
allocatedDevices: make(map[string]sets.String),
|
allocatedDevices: make(map[string]sets.String),
|
||||||
endpoints: make(map[string]endpointInfo),
|
endpoints: make(map[string]endpointInfo),
|
||||||
podDevices: make(podDevices),
|
podDevices: newPodDevices(),
|
||||||
sourcesReady: &sourcesReadyStub{},
|
sourcesReady: &sourcesReadyStub{},
|
||||||
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
||||||
topologyAffinityStore: &mockAffinityStore{tc.hint},
|
topologyAffinityStore: &mockAffinityStore{tc.hint},
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
@ -2193,7 +2193,7 @@ func (kl *Kubelet) ListenAndServePodResources() {
|
|||||||
klog.V(2).Infof("Failed to get local endpoint for PodResources endpoint: %v", err)
|
klog.V(2).Infof("Failed to get local endpoint for PodResources endpoint: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager)
|
server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager, kl.containerManager)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
|
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
|
||||||
|
@ -179,10 +179,10 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListenAndServePodResources initializes a gRPC server to serve the PodResources service
|
// ListenAndServePodResources initializes a gRPC server to serve the PodResources service
|
||||||
func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider) {
|
func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider, cpusProvider podresources.CPUsProvider) {
|
||||||
server := grpc.NewServer()
|
server := grpc.NewServer()
|
||||||
podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(podsProvider, devicesProvider))
|
podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(podsProvider, devicesProvider))
|
||||||
podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(podsProvider, devicesProvider))
|
podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(podsProvider, devicesProvider, cpusProvider))
|
||||||
l, err := util.CreateListener(socket)
|
l, err := util.CreateListener(socket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("Failed to create listener for podResources endpoint: %v", err)
|
klog.Fatalf("Failed to create listener for podResources endpoint: %v", err)
|
||||||
|
@ -195,6 +195,7 @@ func (m *PodResources) GetContainers() []*ContainerResources {
|
|||||||
type ContainerResources struct {
|
type ContainerResources struct {
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"`
|
Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"`
|
||||||
|
CpuIds []int64 `protobuf:"varint,3,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
@ -245,10 +246,18 @@ func (m *ContainerResources) GetDevices() []*ContainerDevices {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *ContainerResources) GetCpuIds() []int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.CpuIds
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerDevices contains information about the devices assigned to a container
|
// ContainerDevices contains information about the devices assigned to a container
|
||||||
type ContainerDevices struct {
|
type ContainerDevices struct {
|
||||||
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
|
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
|
||||||
DeviceIds []string `protobuf:"bytes,2,rep,name=device_ids,json=deviceIds,proto3" json:"device_ids,omitempty"`
|
DeviceIds []string `protobuf:"bytes,2,rep,name=device_ids,json=deviceIds,proto3" json:"device_ids,omitempty"`
|
||||||
|
Topology *TopologyInfo `protobuf:"bytes,3,opt,name=topology,proto3" json:"topology,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
@ -299,40 +308,146 @@ func (m *ContainerDevices) GetDeviceIds() []string {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *ContainerDevices) GetTopology() *TopologyInfo {
|
||||||
|
if m != nil {
|
||||||
|
return m.Topology
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Topology describes hardware topology of the resource
|
||||||
|
type TopologyInfo struct {
|
||||||
|
Nodes []*NUMANode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TopologyInfo) Reset() { *m = TopologyInfo{} }
|
||||||
|
func (*TopologyInfo) ProtoMessage() {}
|
||||||
|
func (*TopologyInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_00212fb1f9d3bf1c, []int{5}
|
||||||
|
}
|
||||||
|
func (m *TopologyInfo) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *TopologyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_TopologyInfo.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *TopologyInfo) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_TopologyInfo.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *TopologyInfo) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *TopologyInfo) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_TopologyInfo.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_TopologyInfo proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *TopologyInfo) GetNodes() []*NUMANode {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nodes
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NUMA representation of NUMA node
|
||||||
|
type NUMANode struct {
|
||||||
|
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NUMANode) Reset() { *m = NUMANode{} }
|
||||||
|
func (*NUMANode) ProtoMessage() {}
|
||||||
|
func (*NUMANode) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_00212fb1f9d3bf1c, []int{6}
|
||||||
|
}
|
||||||
|
func (m *NUMANode) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *NUMANode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_NUMANode.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *NUMANode) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_NUMANode.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *NUMANode) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *NUMANode) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_NUMANode.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_NUMANode proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *NUMANode) GetID() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.ID
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*ListPodResourcesRequest)(nil), "v1.ListPodResourcesRequest")
|
proto.RegisterType((*ListPodResourcesRequest)(nil), "v1.ListPodResourcesRequest")
|
||||||
proto.RegisterType((*ListPodResourcesResponse)(nil), "v1.ListPodResourcesResponse")
|
proto.RegisterType((*ListPodResourcesResponse)(nil), "v1.ListPodResourcesResponse")
|
||||||
proto.RegisterType((*PodResources)(nil), "v1.PodResources")
|
proto.RegisterType((*PodResources)(nil), "v1.PodResources")
|
||||||
proto.RegisterType((*ContainerResources)(nil), "v1.ContainerResources")
|
proto.RegisterType((*ContainerResources)(nil), "v1.ContainerResources")
|
||||||
proto.RegisterType((*ContainerDevices)(nil), "v1.ContainerDevices")
|
proto.RegisterType((*ContainerDevices)(nil), "v1.ContainerDevices")
|
||||||
|
proto.RegisterType((*TopologyInfo)(nil), "v1.TopologyInfo")
|
||||||
|
proto.RegisterType((*NUMANode)(nil), "v1.NUMANode")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) }
|
func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) }
|
||||||
|
|
||||||
var fileDescriptor_00212fb1f9d3bf1c = []byte{
|
var fileDescriptor_00212fb1f9d3bf1c = []byte{
|
||||||
// 339 bytes of a gzipped FileDescriptorProto
|
// 424 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x4e, 0xf2, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
|
||||||
0x10, 0xc7, 0x59, 0x20, 0xdf, 0x97, 0x8e, 0x90, 0x90, 0x8d, 0xd1, 0x8a, 0xb8, 0x21, 0xeb, 0x85,
|
0x10, 0xcd, 0xda, 0xa5, 0xad, 0x07, 0x17, 0x55, 0x2b, 0x44, 0x4d, 0x08, 0x56, 0xb4, 0x5c, 0x7a,
|
||||||
0x8b, 0x25, 0x60, 0xf4, 0x01, 0xc4, 0x8b, 0x89, 0x31, 0xda, 0x83, 0xd1, 0x13, 0x29, 0xed, 0x8a,
|
0x00, 0x57, 0x0d, 0x82, 0x3b, 0x34, 0x17, 0x4b, 0x10, 0xc1, 0x0a, 0x0e, 0x9c, 0x22, 0xc7, 0xbb,
|
||||||
0x7b, 0xa0, 0xbb, 0x76, 0x5b, 0xe2, 0xd1, 0x47, 0xf0, 0xb1, 0x38, 0x7a, 0xf4, 0x28, 0xf5, 0x45,
|
0x35, 0x96, 0xa8, 0x67, 0xeb, 0xb5, 0x23, 0xb8, 0x71, 0xe0, 0x03, 0xf8, 0xac, 0x1e, 0x39, 0x72,
|
||||||
0xcc, 0xee, 0xa6, 0xa1, 0x08, 0x9e, 0x3a, 0xf3, 0xff, 0xcd, 0xcc, 0xbf, 0x9d, 0x29, 0x38, 0x81,
|
0xa4, 0xe6, 0x47, 0xd0, 0xae, 0x71, 0xe3, 0x90, 0x70, 0xf2, 0xcc, 0x7b, 0x33, 0xef, 0x8d, 0x77,
|
||||||
0xe4, 0x9e, 0x4c, 0x44, 0x2a, 0x70, 0x75, 0x3e, 0x68, 0x9f, 0x4c, 0x79, 0xfa, 0x9c, 0x4d, 0xbc,
|
0x06, 0xbc, 0x44, 0xe5, 0x91, 0x2a, 0xb1, 0x42, 0xea, 0x2c, 0x4f, 0x87, 0x4f, 0xb2, 0xbc, 0xfa,
|
||||||
0x50, 0xcc, 0xfa, 0x53, 0x31, 0x15, 0x7d, 0x83, 0x26, 0xd9, 0x93, 0xc9, 0x4c, 0x62, 0x22, 0xdb,
|
0x58, 0x2f, 0xa2, 0x14, 0x2f, 0x4e, 0x32, 0xcc, 0xf0, 0xc4, 0x52, 0x8b, 0xfa, 0xdc, 0x66, 0x36,
|
||||||
0x42, 0x0f, 0x60, 0xff, 0x9a, 0xab, 0xf4, 0x56, 0x44, 0x3e, 0x53, 0x22, 0x4b, 0x42, 0xa6, 0x7c,
|
0xb1, 0x51, 0xdb, 0xc2, 0xee, 0xc3, 0xd1, 0xab, 0x5c, 0x57, 0x6f, 0x50, 0x70, 0xa9, 0xb1, 0x2e,
|
||||||
0xf6, 0x92, 0x31, 0x95, 0xd2, 0x3b, 0x70, 0x37, 0x91, 0x92, 0x22, 0x56, 0x0c, 0x9f, 0x41, 0x53,
|
0x53, 0xa9, 0xb9, 0xbc, 0xac, 0xa5, 0xae, 0xd8, 0x5b, 0x08, 0x36, 0x29, 0xad, 0xb0, 0xd0, 0x92,
|
||||||
0x8a, 0x68, 0x9c, 0x14, 0xc0, 0x45, 0xdd, 0x5a, 0x6f, 0x67, 0xd8, 0xf2, 0xe6, 0x03, 0x6f, 0xad,
|
0x3e, 0x83, 0x03, 0x85, 0x62, 0x5e, 0x76, 0x44, 0x40, 0xc6, 0xee, 0xf1, 0xed, 0xc9, 0x61, 0xb4,
|
||||||
0xa1, 0x21, 0x4b, 0x19, 0x7d, 0x85, 0x46, 0x99, 0x62, 0x0c, 0xf5, 0x38, 0x98, 0x31, 0x17, 0x75,
|
0x3c, 0x8d, 0xd6, 0x1a, 0x7c, 0xd5, 0xcb, 0xd8, 0x67, 0xf0, 0xfb, 0x2c, 0xa5, 0xb0, 0x53, 0x24,
|
||||||
0x51, 0xcf, 0xf1, 0x4d, 0x8c, 0x3b, 0xe0, 0xe8, 0xa7, 0x92, 0x41, 0xc8, 0xdc, 0xaa, 0x01, 0x2b,
|
0x17, 0x32, 0x20, 0x63, 0x72, 0xec, 0x71, 0x1b, 0xd3, 0x11, 0x78, 0xe6, 0xab, 0x55, 0x92, 0xca,
|
||||||
0x01, 0x9f, 0x03, 0x84, 0x22, 0x4e, 0x03, 0x1e, 0xb3, 0x44, 0xb9, 0x35, 0xe3, 0xba, 0xa7, 0x5d,
|
0xc0, 0xb1, 0xc4, 0x0a, 0xa0, 0xcf, 0x01, 0x52, 0x2c, 0xaa, 0x24, 0x2f, 0x64, 0xa9, 0x03, 0xd7,
|
||||||
0x47, 0x85, 0xba, 0xf2, 0x2e, 0x55, 0xd2, 0x07, 0xc0, 0x9b, 0x15, 0x5b, 0xfd, 0x3d, 0xf8, 0x1f,
|
0xba, 0xde, 0x33, 0xae, 0x67, 0x1d, 0xba, 0xf2, 0xee, 0x55, 0xb2, 0x4b, 0xa0, 0x9b, 0x15, 0x5b,
|
||||||
0xb1, 0x39, 0xd7, 0x1f, 0x55, 0x35, 0xe3, 0x77, 0xd7, 0xc6, 0x5f, 0x5a, 0xe6, 0x17, 0x45, 0xf4,
|
0xfd, 0x23, 0xd8, 0x13, 0x72, 0x99, 0x9b, 0x9f, 0x72, 0xac, 0xfc, 0xdd, 0x35, 0xf9, 0x69, 0xcb,
|
||||||
0x1e, 0x5a, 0xbf, 0x21, 0x3e, 0x86, 0x66, 0xb1, 0x9a, 0x71, 0xc9, 0xa0, 0x51, 0x88, 0x37, 0xda,
|
0xf1, 0xae, 0x88, 0x1e, 0xc1, 0x5e, 0xaa, 0xea, 0x79, 0x2e, 0xda, 0x71, 0x5c, 0xbe, 0x9b, 0xaa,
|
||||||
0xe8, 0x08, 0xc0, 0xce, 0x18, 0xf3, 0xc8, 0x7a, 0x39, 0xbe, 0x63, 0x95, 0xab, 0x48, 0x0d, 0x1f,
|
0x3a, 0x16, 0x9a, 0x7d, 0x23, 0x70, 0xf8, 0x6f, 0x1b, 0x7d, 0x04, 0x07, 0xdd, 0xa3, 0xcd, 0x7b,
|
||||||
0x01, 0x97, 0x77, 0xa5, 0x4f, 0xc1, 0x12, 0x3c, 0x82, 0xba, 0x8e, 0xf0, 0xa1, 0x7e, 0xa9, 0x3f,
|
0xd6, 0x7e, 0x07, 0xce, 0xcc, 0x08, 0x0f, 0x01, 0x5a, 0x75, 0xab, 0x6a, 0xa6, 0xf0, 0xb8, 0xd7,
|
||||||
0x2e, 0xd7, 0xee, 0x6c, 0x87, 0xf6, 0x76, 0xb4, 0x72, 0xd1, 0x59, 0x2c, 0x09, 0xfa, 0x5c, 0x92,
|
0x22, 0xb1, 0xd0, 0xf4, 0x31, 0xec, 0x57, 0xa8, 0xf0, 0x13, 0x66, 0x5f, 0x02, 0x77, 0x4c, 0xba,
|
||||||
0xca, 0x5b, 0x4e, 0xd0, 0x22, 0x27, 0xe8, 0x23, 0x27, 0xe8, 0x2b, 0x27, 0xe8, 0xfd, 0x9b, 0x54,
|
0x77, 0x7f, 0xf7, 0x17, 0x8b, 0x8b, 0x73, 0xe4, 0x37, 0x15, 0x6c, 0x02, 0x7e, 0x9f, 0xa1, 0x0c,
|
||||||
0x26, 0xff, 0xcc, 0x9f, 0x71, 0xfa, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x28, 0xb2, 0xaa, 0x59,
|
0x6e, 0x15, 0x28, 0x6e, 0x56, 0xe6, 0x9b, 0xd6, 0xd9, 0xfb, 0xd7, 0x2f, 0x66, 0x28, 0x24, 0x6f,
|
||||||
0x02, 0x00, 0x00,
|
0x29, 0x36, 0x84, 0xfd, 0x0e, 0xa2, 0x77, 0xc0, 0x89, 0xa7, 0x76, 0x4c, 0x97, 0x3b, 0xf9, 0x74,
|
||||||
|
0xf2, 0x01, 0x68, 0x7f, 0x87, 0xe6, 0x44, 0x64, 0x49, 0xcf, 0x60, 0xc7, 0x44, 0xf4, 0x81, 0x91,
|
||||||
|
0xfb, 0xcf, 0x45, 0x0d, 0x47, 0xdb, 0xc9, 0xf6, 0xa6, 0xd8, 0xe0, 0xe5, 0xe8, 0xea, 0x3a, 0x24,
|
||||||
|
0x3f, 0xaf, 0xc3, 0xc1, 0xd7, 0x26, 0x24, 0x57, 0x4d, 0x48, 0x7e, 0x34, 0x21, 0xf9, 0xd5, 0x84,
|
||||||
|
0xe4, 0xfb, 0xef, 0x70, 0xb0, 0xd8, 0xb5, 0x17, 0xfb, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff,
|
||||||
|
0x34, 0x5b, 0xe8, 0xc9, 0xf1, 0x02, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -546,6 +661,25 @@ func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if len(m.CpuIds) > 0 {
|
||||||
|
dAtA2 := make([]byte, len(m.CpuIds)*10)
|
||||||
|
var j1 int
|
||||||
|
for _, num1 := range m.CpuIds {
|
||||||
|
num := uint64(num1)
|
||||||
|
for num >= 1<<7 {
|
||||||
|
dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80)
|
||||||
|
num >>= 7
|
||||||
|
j1++
|
||||||
|
}
|
||||||
|
dAtA2[j1] = uint8(num)
|
||||||
|
j1++
|
||||||
|
}
|
||||||
|
i -= j1
|
||||||
|
copy(dAtA[i:], dAtA2[:j1])
|
||||||
|
i = encodeVarintApi(dAtA, i, uint64(j1))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
if len(m.Devices) > 0 {
|
if len(m.Devices) > 0 {
|
||||||
for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
|
for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
{
|
{
|
||||||
@ -590,6 +724,18 @@ func (m *ContainerDevices) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if m.Topology != nil {
|
||||||
|
{
|
||||||
|
size, err := m.Topology.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintApi(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
if len(m.DeviceIds) > 0 {
|
if len(m.DeviceIds) > 0 {
|
||||||
for iNdEx := len(m.DeviceIds) - 1; iNdEx >= 0; iNdEx-- {
|
for iNdEx := len(m.DeviceIds) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
i -= len(m.DeviceIds[iNdEx])
|
i -= len(m.DeviceIds[iNdEx])
|
||||||
@ -609,6 +755,71 @@ func (m *ContainerDevices) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||||||
return len(dAtA) - i, nil
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *TopologyInfo) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TopologyInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TopologyInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Nodes) > 0 {
|
||||||
|
for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintApi(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NUMANode) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NUMANode) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NUMANode) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.ID != 0 {
|
||||||
|
i = encodeVarintApi(dAtA, i, uint64(m.ID))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func encodeVarintApi(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintApi(dAtA []byte, offset int, v uint64) int {
|
||||||
offset -= sovApi(v)
|
offset -= sovApi(v)
|
||||||
base := offset
|
base := offset
|
||||||
@ -683,6 +894,13 @@ func (m *ContainerResources) Size() (n int) {
|
|||||||
n += 1 + l + sovApi(uint64(l))
|
n += 1 + l + sovApi(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.CpuIds) > 0 {
|
||||||
|
l = 0
|
||||||
|
for _, e := range m.CpuIds {
|
||||||
|
l += sovApi(uint64(e))
|
||||||
|
}
|
||||||
|
n += 1 + sovApi(uint64(l)) + l
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -702,6 +920,37 @@ func (m *ContainerDevices) Size() (n int) {
|
|||||||
n += 1 + l + sovApi(uint64(l))
|
n += 1 + l + sovApi(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if m.Topology != nil {
|
||||||
|
l = m.Topology.Size()
|
||||||
|
n += 1 + l + sovApi(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TopologyInfo) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Nodes) > 0 {
|
||||||
|
for _, e := range m.Nodes {
|
||||||
|
l = e.Size()
|
||||||
|
n += 1 + l + sovApi(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NUMANode) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.ID != 0 {
|
||||||
|
n += 1 + sovApi(uint64(m.ID))
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -764,6 +1013,7 @@ func (this *ContainerResources) String() string {
|
|||||||
s := strings.Join([]string{`&ContainerResources{`,
|
s := strings.Join([]string{`&ContainerResources{`,
|
||||||
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
`Devices:` + repeatedStringForDevices + `,`,
|
`Devices:` + repeatedStringForDevices + `,`,
|
||||||
|
`CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -775,6 +1025,32 @@ func (this *ContainerDevices) String() string {
|
|||||||
s := strings.Join([]string{`&ContainerDevices{`,
|
s := strings.Join([]string{`&ContainerDevices{`,
|
||||||
`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
|
`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
|
||||||
`DeviceIds:` + fmt.Sprintf("%v", this.DeviceIds) + `,`,
|
`DeviceIds:` + fmt.Sprintf("%v", this.DeviceIds) + `,`,
|
||||||
|
`Topology:` + strings.Replace(this.Topology.String(), "TopologyInfo", "TopologyInfo", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *TopologyInfo) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
repeatedStringForNodes := "[]*NUMANode{"
|
||||||
|
for _, f := range this.Nodes {
|
||||||
|
repeatedStringForNodes += strings.Replace(f.String(), "NUMANode", "NUMANode", 1) + ","
|
||||||
|
}
|
||||||
|
repeatedStringForNodes += "}"
|
||||||
|
s := strings.Join([]string{`&TopologyInfo{`,
|
||||||
|
`Nodes:` + repeatedStringForNodes + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *NUMANode) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&NUMANode{`,
|
||||||
|
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -1173,6 +1449,82 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType == 0 {
|
||||||
|
var v int64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
v |= int64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.CpuIds = append(m.CpuIds, v)
|
||||||
|
} else if wireType == 2 {
|
||||||
|
var packedLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
packedLen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if packedLen < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + packedLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var elementCount int
|
||||||
|
var count int
|
||||||
|
for _, integer := range dAtA[iNdEx:postIndex] {
|
||||||
|
if integer < 128 {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elementCount = count
|
||||||
|
if elementCount != 0 && len(m.CpuIds) == 0 {
|
||||||
|
m.CpuIds = make([]int64, 0, elementCount)
|
||||||
|
}
|
||||||
|
for iNdEx < postIndex {
|
||||||
|
var v int64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
v |= int64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.CpuIds = append(m.CpuIds, v)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field CpuIds", wireType)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipApi(dAtA[iNdEx:])
|
skippy, err := skipApi(dAtA[iNdEx:])
|
||||||
@ -1290,6 +1642,201 @@ func (m *ContainerDevices) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
m.DeviceIds = append(m.DeviceIds, string(dAtA[iNdEx:postIndex]))
|
m.DeviceIds = append(m.DeviceIds, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Topology == nil {
|
||||||
|
m.Topology = &TopologyInfo{}
|
||||||
|
}
|
||||||
|
if err := m.Topology.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipApi(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *TopologyInfo) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: TopologyInfo: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: TopologyInfo: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Nodes = append(m.Nodes, &NUMANode{})
|
||||||
|
if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipApi(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthApi
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *NUMANode) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NUMANode: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NUMANode: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
||||||
|
}
|
||||||
|
m.ID = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowApi
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.ID |= int64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipApi(dAtA[iNdEx:])
|
skippy, err := skipApi(dAtA[iNdEx:])
|
||||||
|
@ -39,10 +39,22 @@ message PodResources {
|
|||||||
message ContainerResources {
|
message ContainerResources {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
repeated ContainerDevices devices = 2;
|
repeated ContainerDevices devices = 2;
|
||||||
|
repeated int64 cpu_ids = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerDevices contains information about the devices assigned to a container
|
// ContainerDevices contains information about the devices assigned to a container
|
||||||
message ContainerDevices {
|
message ContainerDevices {
|
||||||
string resource_name = 1;
|
string resource_name = 1;
|
||||||
repeated string device_ids = 2;
|
repeated string device_ids = 2;
|
||||||
|
TopologyInfo topology = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Topology describes hardware topology of the resource
|
||||||
|
message TopologyInfo {
|
||||||
|
repeated NUMANode nodes = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NUMA representation of NUMA node
|
||||||
|
message NUMANode {
|
||||||
|
int64 ID = 1;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user