mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 15:05:20 +00:00
Device plugin code refactoring to cope with file move.
While moving device_plugin_handler_test.go from pkg/kubelet/cm/ to pkg/kubelet/cm/deviceplugin/, we can no longer uses cm in its tests because that would cause a cycle dependency. To solve this problem, I moved the main cm GetResources functionality as well as part of the current device plugin handler Allocate functionality into a new device plugin handler function, GetDeviceRunContainerOptions(). This refactoring is also needed by another PR 51895 that moves device allocation into admission phase. Now device plugin handler Allocate() first checks whether there is cached device runtime state and only issues Allocate grpc call if there is no cached state available. The new GetDeviceRunContainerOptions() function simply returns device runtime config from the cached state. To support this change, extended the podDevices struct and checkpoint data structure with device runtime state.
This commit is contained in:
@@ -45,6 +45,7 @@ import (
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin"
|
||||
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
@@ -125,7 +126,7 @@ type containerManagerImpl struct {
|
||||
// Interface for QoS cgroup management
|
||||
qosContainerManager QOSContainerManager
|
||||
// Interface for exporting and allocating devices reported by device plugins.
|
||||
devicePluginHandler DevicePluginHandler
|
||||
devicePluginHandler deviceplugin.Handler
|
||||
// Interface for CPU affinity management.
|
||||
cpuManager cpumanager.Manager
|
||||
}
|
||||
@@ -273,9 +274,9 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
|
||||
glog.Infof("Creating device plugin handler: %t", devicePluginEnabled)
|
||||
if devicePluginEnabled {
|
||||
cm.devicePluginHandler, err = NewDevicePluginHandlerImpl(updateDeviceCapacityFunc)
|
||||
cm.devicePluginHandler, err = deviceplugin.NewHandlerImpl(updateDeviceCapacityFunc)
|
||||
} else {
|
||||
cm.devicePluginHandler, err = NewDevicePluginHandlerStub()
|
||||
cm.devicePluginHandler, err = deviceplugin.NewHandlerStub()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -618,72 +619,17 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
|
||||
opts := &kubecontainer.RunContainerOptions{}
|
||||
// Gets devices, mounts, and envs from device plugin handler.
|
||||
glog.V(3).Infof("Calling devicePluginHandler AllocateDevices")
|
||||
// Maps to detect duplicate settings.
|
||||
devsMap := make(map[string]string)
|
||||
mountsMap := make(map[string]string)
|
||||
envsMap := make(map[string]string)
|
||||
allocResps, err := cm.devicePluginHandler.Allocate(pod, container, activePods)
|
||||
err := cm.devicePluginHandler.Allocate(pod, container, activePods)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
// Loops through AllocationResponses of all required extended resources.
|
||||
for _, resp := range allocResps {
|
||||
// Each Allocate response has the following artifacts.
|
||||
// Environment variables
|
||||
// Mount points
|
||||
// Device files
|
||||
// These artifacts are per resource per container.
|
||||
// Updates RunContainerOptions.Envs.
|
||||
for k, v := range resp.Envs {
|
||||
if e, ok := envsMap[k]; ok {
|
||||
glog.V(3).Infof("skip existing envs %s %s", k, v)
|
||||
if e != v {
|
||||
glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
envsMap[k] = v
|
||||
opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v})
|
||||
}
|
||||
|
||||
// Updates RunContainerOptions.Devices.
|
||||
for _, dev := range resp.Devices {
|
||||
if d, ok := devsMap[dev.ContainerPath]; ok {
|
||||
glog.V(3).Infof("skip existing device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
if d != dev.HostPath {
|
||||
glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s",
|
||||
dev.ContainerPath, d, dev.HostPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
devsMap[dev.ContainerPath] = dev.HostPath
|
||||
opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{
|
||||
PathOnHost: dev.HostPath,
|
||||
PathInContainer: dev.ContainerPath,
|
||||
Permissions: dev.Permissions,
|
||||
})
|
||||
}
|
||||
// Updates RunContainerOptions.Mounts.
|
||||
for _, mount := range resp.Mounts {
|
||||
if m, ok := mountsMap[mount.ContainerPath]; ok {
|
||||
glog.V(3).Infof("skip existing mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
if m != mount.HostPath {
|
||||
glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s",
|
||||
mount.ContainerPath, m, mount.HostPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
mountsMap[mount.ContainerPath] = mount.HostPath
|
||||
opts.Mounts = append(opts.Mounts, kubecontainer.Mount{
|
||||
Name: mount.ContainerPath,
|
||||
ContainerPath: mount.ContainerPath,
|
||||
HostPath: mount.HostPath,
|
||||
ReadOnly: mount.ReadOnly,
|
||||
// TODO: This may need to be part of Device plugin API.
|
||||
SELinuxRelabel: false,
|
||||
})
|
||||
}
|
||||
devOpts := cm.devicePluginHandler.GetDeviceRunContainerOptions(pod, container)
|
||||
if devOpts == nil {
|
||||
return opts, nil
|
||||
}
|
||||
opts.Devices = append(opts.Devices, devOpts.Devices...)
|
||||
opts.Mounts = append(opts.Mounts, devOpts.Mounts...)
|
||||
opts.Envs = append(opts.Envs, devOpts.Envs...)
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user