mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-17 15:50:10 +00:00
kubelet dra: fix checking of second pod which uses a claim
When a second pod wanted to use a claim, the obligatory sanity check whether the pod is really allowed to use the claim ("reserved for") was skipped.
This commit is contained in:
parent
ec70b2ec80
commit
874daa8b52
@ -70,20 +70,6 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
|
|||||||
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
|
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
|
||||||
klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name)
|
klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name)
|
||||||
|
|
||||||
// Resource is already prepared, add pod UID to it
|
|
||||||
if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil {
|
|
||||||
// We delay checkpointing of this change until this call
|
|
||||||
// returns successfully. It is OK to do this because we
|
|
||||||
// will only return successfully from this call if the
|
|
||||||
// checkpoint has succeeded. That means if the kubelet is
|
|
||||||
// ever restarted before this checkpoint succeeds, the pod
|
|
||||||
// whose resources are being prepared would never have
|
|
||||||
// started, so it's OK (actually correct) to not include it
|
|
||||||
// in the cache.
|
|
||||||
claimInfo.addPodReference(pod.UID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query claim object from the API server
|
// Query claim object from the API server
|
||||||
resourceClaim, err := m.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get(
|
resourceClaim, err := m.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get(
|
||||||
context.TODO(),
|
context.TODO(),
|
||||||
@ -99,6 +85,20 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
|
|||||||
pod.Name, pod.UID, claimName, resourceClaim.UID)
|
pod.Name, pod.UID, claimName, resourceClaim.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is the resource already prepared? Then add the pod UID to it.
|
||||||
|
if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil {
|
||||||
|
// We delay checkpointing of this change until this call
|
||||||
|
// returns successfully. It is OK to do this because we
|
||||||
|
// will only return successfully from this call if the
|
||||||
|
// checkpoint has succeeded. That means if the kubelet is
|
||||||
|
// ever restarted before this checkpoint succeeds, the pod
|
||||||
|
// whose resources are being prepared would never have
|
||||||
|
// started, so it's OK (actually correct) to not include it
|
||||||
|
// in the cache.
|
||||||
|
claimInfo.addPodReference(pod.UID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Grab the allocation.resourceHandles. If there are no
|
// Grab the allocation.resourceHandles. If there are no
|
||||||
// allocation.resourceHandles, create a single resourceHandle with no
|
// allocation.resourceHandles, create a single resourceHandle with no
|
||||||
// content. This will trigger processing of this claim by a single
|
// content. This will trigger processing of this claim by a single
|
||||||
|
Loading…
Reference in New Issue
Block a user