mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Fix race when two provisioner create two PVs for a single claim.
This commit is contained in:
parent
18de83c641
commit
2224e80dd7
@ -148,6 +148,49 @@ func TestDeleteSync(t *testing.T) {
|
||||
return testSyncVolume(ctrl, reactor, test)
|
||||
},
|
||||
},
|
||||
{
|
||||
// delete success - two PVs are provisioned for a single claim.
|
||||
// One of the PVs is deleted.
|
||||
"8-11 - two PVs provisioned for a single claim",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
// the claim is bound to volume8-11-2 -> volume8-11-1 has lost the race and will be deleted
|
||||
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", api.ClaimBound),
|
||||
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", api.ClaimBound),
|
||||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete success - two PVs are externally provisioned for a single
|
||||
// claim. One of the PVs is marked as Released to be deleted by the
|
||||
// external provisioner.
|
||||
"8-12 - two PVs externally provisioned for a single claim",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", api.VolumeReleased, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
// the claim is bound to volume8-12-2 -> volume8-12-1 has lost the race and will be "Released"
|
||||
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", api.ClaimBound),
|
||||
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", api.ClaimBound),
|
||||
noevents, noerrors,
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
// Inject external deleter annotation
|
||||
test.initialVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
|
||||
test.expectedVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
|
||||
return testSyncVolume(ctrl, reactor, test)
|
||||
},
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests, []*storage.StorageClass{})
|
||||
}
|
||||
|
@ -492,6 +492,17 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume)
|
||||
// This volume was dynamically provisioned for this claim. The
|
||||
// claim got bound elsewhere, and thus this volume is not
|
||||
// needed. Delete it.
|
||||
// Mark the volume as Released for external deleters and to let
|
||||
// the user know. Don't overwrite existing Failed status!
|
||||
if volume.Status.Phase != api.VolumeReleased && volume.Status.Phase != api.VolumeFailed {
|
||||
// Also, log this only once:
|
||||
glog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name)
|
||||
if volume, err = ctrl.updateVolumePhase(volume, api.VolumeReleased, ""); err != nil {
|
||||
// Nothing was saved; we will fall back into the same condition
|
||||
// in the next call to this method
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = ctrl.reclaimVolume(volume); err != nil {
|
||||
// Deletion failed, we will fall back into the same condition
|
||||
// in the next call to this method
|
||||
@ -1131,6 +1142,12 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV
|
||||
}
|
||||
if claim != nil && claim.UID == volume.Spec.ClaimRef.UID {
|
||||
// the claim still exists and has the right UID
|
||||
|
||||
if len(claim.Spec.VolumeName) > 0 && claim.Spec.VolumeName != volume.Name {
|
||||
// the claim is bound to another PV, this PV *is* released
|
||||
return true, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user