mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #18877 from markturansky/fix_18830
Trigger syncClaim after PV provisioning to reduce wait
This commit is contained in:
commit
12845ba9a5
@ -226,7 +226,7 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
||||
nextPhase = api.VolumeAvailable
|
||||
|
||||
if volume.Spec.ClaimRef != nil {
|
||||
_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
||||
claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
// Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
|
||||
// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
|
||||
@ -250,6 +250,14 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
|
||||
}
|
||||
|
||||
// Dynamically provisioned claims remain Pending until its volume is completely provisioned.
|
||||
// The provisioner updates the PV and triggers this update for the volume. Explicitly sync'ing
|
||||
// the claim here prevents the need to wait until the next sync period when the claim would normally
|
||||
// advance to Bound phase. Otherwise, the maximum wait time for the claim to be Bound is the default sync period.
|
||||
if claim != nil && claim.Status.Phase == api.ClaimPending && keyExists(qosProvisioningKey, claim.Annotations) && isProvisioningComplete(volume) {
|
||||
syncClaim(volumeIndex, binderClient, claim)
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name)
|
||||
|
||||
|
@ -143,6 +143,76 @@ func TestClaimRace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaimSyncAfterVolumeProvisioning(t *testing.T) {
|
||||
// Tests that binder.syncVolume will also syncClaim if the PV has completed
|
||||
// provisioning but the claim is still Pending. We want to advance to Bound
|
||||
// without having to wait until the binder's next sync period.
|
||||
claim := &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
Annotations: map[string]string{
|
||||
qosProvisioningKey: "foo",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PersistentVolumeClaimStatus{
|
||||
Phase: api.ClaimPending,
|
||||
},
|
||||
}
|
||||
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
|
||||
claimRef, _ := api.GetReference(claim)
|
||||
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Annotations: map[string]string{
|
||||
pvProvisioningRequiredAnnotationKey: pvProvisioningCompletedAnnotationValue,
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
Path: "/tmp/data01",
|
||||
},
|
||||
},
|
||||
ClaimRef: claimRef,
|
||||
},
|
||||
Status: api.PersistentVolumeStatus{
|
||||
Phase: api.VolumePending,
|
||||
},
|
||||
}
|
||||
|
||||
volumeIndex := NewPersistentVolumeOrderedIndex()
|
||||
mockClient := &mockBinderClient{
|
||||
claim: claim,
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
|
||||
// adds the volume to the index, making the volume available.
|
||||
// pv also completed provisioning, so syncClaim should cause claim's phase to advance to Bound
|
||||
syncVolume(volumeIndex, mockClient, pv)
|
||||
if mockClient.volume.Status.Phase != api.VolumeAvailable {
|
||||
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
|
||||
}
|
||||
if mockClient.claim.Status.Phase != api.ClaimBound {
|
||||
t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExampleObjects(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
expected interface{}
|
||||
|
Loading…
Reference in New Issue
Block a user