mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-14 14:23:37 +00:00
kubelet dra: restore skipping of unused resource claims
1aeec10efb
removed iterating over containers in favor of iterating over pod
claims. This had the unintended consequence that NodePrepareResource gets
called unnecessarily when no container needs the claim. The more natural
behavior is to skip unused resources. This enables (theoretic, at this time)
use cases where some DRA driver relies on the controller part to influence
scheduling, but then doesn't use CDI with containers.
This commit is contained in:
parent
874daa8b52
commit
bde66bfb55
@ -67,7 +67,8 @@ func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string) (
|
||||
// containerResources on success.
|
||||
func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
|
||||
for i := range pod.Spec.ResourceClaims {
|
||||
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
|
||||
podClaim := &pod.Spec.ResourceClaims[i]
|
||||
claimName := resourceclaim.Name(pod, podClaim)
|
||||
klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name)
|
||||
|
||||
// Query claim object from the API server
|
||||
@ -85,6 +86,13 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
|
||||
pod.Name, pod.UID, claimName, resourceClaim.UID)
|
||||
}
|
||||
|
||||
// If no container actually uses the claim, then we don't need
|
||||
// to prepare it.
|
||||
if !claimIsUsedByPod(podClaim, pod) {
|
||||
klog.V(5).InfoS("Skipping unused resource", "claim", claimName, "pod", pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Is the resource already prepared? Then add the pod UID to it.
|
||||
if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil {
|
||||
// We delay checkpointing of this change until this call
|
||||
@ -178,6 +186,34 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func claimIsUsedByPod(podClaim *v1.PodResourceClaim, pod *v1.Pod) bool {
|
||||
if claimIsUsedByContainers(podClaim, pod.Spec.InitContainers) {
|
||||
return true
|
||||
}
|
||||
if claimIsUsedByContainers(podClaim, pod.Spec.Containers) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func claimIsUsedByContainers(podClaim *v1.PodResourceClaim, containers []v1.Container) bool {
|
||||
for i := range containers {
|
||||
if claimIsUsedByContainer(podClaim, &containers[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func claimIsUsedByContainer(podClaim *v1.PodResourceClaim, container *v1.Container) bool {
|
||||
for _, c := range container.Resources.Claims {
|
||||
if c.Name == podClaim.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetResources gets a ContainerInfo object from the claimInfo cache.
|
||||
// This information is used by the caller to update a container config.
|
||||
func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user