Files
kubernetes/pkg/kubelet/cm/dra/manager.go
Moshe Levi e7256e08d3 kubelet dra: add checkpointing mechanism in the DRA Manager
The checkpointing mechanism will repopulate DRA Manager in-memory cache on kubelet restart.
This will ensure that the information needed by the PodResources API is available across
a kubelet restart.

The ClaimInfoState struct represent the DRA Manager in-memory cache state in checkpoint.
It is embedd in the ClaimInfo which also include the annotation field. The separation between
the in-memory cache and the cache state in the checkpoint is so we won't be tied to the in-memory
cache struct which may change in the future. In the ClaimInfoState we save the minimal required fields
to restore the in-memory cache.

Signed-off-by: Moshe Levi <moshele@nvidia.com>
2023-03-10 12:22:15 +02:00

257 lines
9.9 KiB
Go

/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/dynamic-resource-allocation/resourceclaim"
"k8s.io/klog/v2"
dra "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// draManagerStateFileName is the file name where dra manager stores its state
const draManagerStateFileName = "dra_manager_state"
// ManagerImpl is the structure in charge of managing DRA resource Plugins.
type ManagerImpl struct {
// cache contains cached claim info
cache *claimInfoCache
// KubeClient reference
kubeClient clientset.Interface
}
// NewManagerImpl creates a new manager.
func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string) (*ManagerImpl, error) {
klog.V(2).InfoS("Creating DRA manager")
claimInfoCache, err := newClaimInfoCache(stateFileDirectory, draManagerStateFileName)
if err != nil {
return nil, fmt.Errorf("failed to create claimInfo cache: %+v", err)
}
manager := &ManagerImpl{
cache: claimInfoCache,
kubeClient: kubeClient,
}
return manager, nil
}
// PrepareResources attempts to prepare all of the required resource
// plugin resources for the input container, issue an NodePrepareResource rpc request
// for each new resource requirement, process their responses and update the cached
// containerResources on success.
func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
// Process resources for each resource claim referenced by container
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
for range container.Resources.Claims {
for i, podResourceClaim := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name)
// Resource is already prepared, add pod UID to it
if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil {
// We delay checkpointing of this change until this call returns successfully.
// It is OK to do this because we will only return successfully from this call if
// the checkpoint has succeeded. That means if the kubelet is ever restarted
// before this checkpoint succeeds, the pod whose resources are being prepared
// would never have started, so it's OK (actually correct) to not include it in the cache.
claimInfo.addPodReference(pod.UID)
continue
}
// Query claim object from the API server
resourceClaim, err := m.kubeClient.ResourceV1alpha1().ResourceClaims(pod.Namespace).Get(
context.TODO(),
claimName,
metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to fetch ResourceClaim %s referenced by pod %s: %+v", claimName, pod.Name, err)
}
// Check if pod is in the ReservedFor for the claim
if !resourceclaim.IsReservedForPod(pod, resourceClaim) {
return fmt.Errorf("pod %s(%s) is not allowed to use resource claim %s(%s)",
pod.Name, pod.UID, podResourceClaim.Name, resourceClaim.UID)
}
// Call NodePrepareResource RPC
driverName := resourceClaim.Status.DriverName
client, err := dra.NewDRAPluginClient(driverName)
if err != nil {
return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", driverName, err)
}
response, err := client.NodePrepareResource(
context.Background(),
resourceClaim.Namespace,
resourceClaim.UID,
resourceClaim.Name,
resourceClaim.Status.Allocation.ResourceHandle)
if err != nil {
return fmt.Errorf("NodePrepareResource failed, claim UID: %s, claim name: %s, resource handle: %s, err: %+v",
resourceClaim.UID, resourceClaim.Name, resourceClaim.Status.Allocation.ResourceHandle, err)
}
klog.V(3).InfoS("NodePrepareResource succeeded", "response", response)
// TODO: We are adding the claimInfo struct to the cache and syncing it to the checkpoint *after* the NodePrepareResource
// call has completed. This will cause issues if the kubelet gets restarted between NodePrepareResource and syncToCheckpoint.
// It will result in not calling NodeUnprepareResource for this claim because no claimInfo will be synced back to the cache
// for it after the restart. We need to resolve this issue before moving to beta.
claimInfo, err := newClaimInfo(
driverName,
resourceClaim.UID,
resourceClaim.Name,
resourceClaim.Namespace,
sets.New(string(pod.UID)),
response.CdiDevices)
if err != nil {
return fmt.Errorf("newClaimInfo failed, claim UID: %s, claim name: %s, claim namespace: %s, err: %+v",
resourceClaim.UID, resourceClaim.Name, resourceClaim.Namespace, err)
}
m.cache.add(claimInfo)
// Checkpoint to reduce redundant calls to NodePrepareResource() after a kubelet restart.
err = m.cache.syncToCheckpoint()
if err != nil {
return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err)
}
}
}
}
// Checkpoint to capture all of the previous addPodReference() calls.
err := m.cache.syncToCheckpoint()
if err != nil {
return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err)
}
return nil
}
// GetResources gets a ContainerInfo object from the claimInfo cache.
// This information is used by the caller to update a container config.
func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) {
annotations := []kubecontainer.Annotation{}
cdiDevices := []kubecontainer.CDIDevice{}
for i, podResourceClaim := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
for _, claim := range container.Resources.Claims {
if podResourceClaim.Name != claim.Name {
continue
}
claimInfo := m.cache.get(claimName, pod.Namespace)
if claimInfo == nil {
return nil, fmt.Errorf("unable to get resource for namespace: %s, claim: %s", pod.Namespace, claimName)
}
klog.V(3).InfoS("Add resource annotations", "claim", claimName, "annotations", claimInfo.annotations)
annotations = append(annotations, claimInfo.annotations...)
for _, cdiDevice := range claimInfo.CdiDevices {
cdiDevices = append(cdiDevices, kubecontainer.CDIDevice{Name: cdiDevice})
}
}
}
return &ContainerInfo{Annotations: annotations, CDIDevices: cdiDevices}, nil
}
// UnprepareResources calls a plugin's NodeUnprepareResource API for each resource claim owned by a pod.
// This function is idempotent and may be called multiple times against the same pod.
// As such, calls to the underlying NodeUnprepareResource API are skipped for claims that have
// already been successfully unprepared.
func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error {
// Call NodeUnprepareResource RPC for every resource claim referenced by the pod
for i := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
claimInfo := m.cache.get(claimName, pod.Namespace)
// Skip calling NodeUnprepareResource if claim info is not cached
if claimInfo == nil {
continue
}
// Skip calling NodeUnprepareResource if other pods are still referencing it
if len(claimInfo.PodUIDs) > 1 {
// We delay checkpointing of this change until this call returns successfully.
// It is OK to do this because we will only return successfully from this call if
// the checkpoint has succeeded. That means if the kubelet is ever restarted
// before this checkpoint succeeds, we will simply call into this (idempotent)
// function again.
claimInfo.deletePodReference(pod.UID)
continue
}
// Call NodeUnprepareResource only for the last pod that references the claim
client, err := dra.NewDRAPluginClient(claimInfo.DriverName)
if err != nil {
return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", claimInfo.DriverName, err)
}
response, err := client.NodeUnprepareResource(
context.Background(),
claimInfo.Namespace,
claimInfo.ClaimUID,
claimInfo.ClaimName,
claimInfo.CdiDevices)
if err != nil {
return fmt.Errorf(
"NodeUnprepareResource failed, pod: %s, claim UID: %s, claim name: %s, CDI devices: %s, err: %+v",
pod.Name, claimInfo.ClaimUID, claimInfo.ClaimName, claimInfo.CdiDevices, err)
}
klog.V(3).InfoS("NodeUnprepareResource succeeded", "response", response)
// Delete last pod UID only if NodeUnprepareResource call succeeds.
// This ensures that status manager doesn't enter termination status
// for the pod. This logic is implemented in the m.PodMightNeedToUnprepareResources
// and in the claimInfo.hasPodReference.
claimInfo.deletePodReference(pod.UID)
m.cache.delete(claimInfo.ClaimName, pod.Namespace)
// Checkpoint to reduce redundant calls to NodeUnPrepareResource() after a kubelet restart.
err = m.cache.syncToCheckpoint()
if err != nil {
return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err)
}
}
// Checkpoint to capture all of the previous deletePodReference() calls.
err := m.cache.syncToCheckpoint()
if err != nil {
return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err)
}
return nil
}
// PodMightNeedToUnprepareResources returns true if the pod might need to
// unprepare resources
func (m *ManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
return m.cache.hasPodReference(UID)
}