From 66b584484132b95fdcb0b80e5d7a64e5e91c8592 Mon Sep 17 00:00:00 2001 From: markturansky Date: Thu, 17 Dec 2015 21:30:12 -0500 Subject: [PATCH] trigger syncClaim after pv provisioning to reduce wait --- ...ersistentvolume_claim_binder_controller.go | 10 ++- ...tentvolume_claim_binder_controller_test.go | 70 +++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go index 9d438203342..d4e7f2f85af 100644 --- a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go @@ -225,7 +225,7 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl nextPhase = api.VolumeAvailable if volume.Spec.ClaimRef != nil { - _, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) + claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) if errors.IsNotFound(err) { // Pending volumes that have a ClaimRef where the claim is missing were recently recycled. // The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle. @@ -249,6 +249,14 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl } else if err != nil { return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err) } + + // Dynamically provisioned claims remain Pending until its volume is completely provisioned. + // The provisioner updates the PV and triggers this update for the volume. Explicitly sync'ing + // the claim here prevents the need to wait until the next sync period when the claim would normally + // advance to Bound phase. Otherwise, the maximum wait time for the claim to be Bound is the default sync period. + if claim != nil && claim.Status.Phase == api.ClaimPending && keyExists(qosProvisioningKey, claim.Annotations) && isProvisioningComplete(volume) { + syncClaim(volumeIndex, binderClient, claim) + } } glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name) diff --git a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go index b0fcc135646..13c3d30b623 100644 --- a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go @@ -143,6 +143,76 @@ func TestClaimRace(t *testing.T) { } } +func TestClaimSyncAfterVolumeProvisioning(t *testing.T) { + // Tests that binder.syncVolume will also syncClaim if the PV has completed + // provisioning but the claim is still Pending. We want to advance to Bound + // without having to wait until the binder's next sync period. + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + qosProvisioningKey: "foo", + }, + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), + }, + }, + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimPending, + }, + } + claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") + claimRef, _ := api.GetReference(claim) + + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Annotations: map[string]string{ + pvProvisioningRequiredAnnotationKey: pvProvisioningCompletedAnnotationValue, + }, + }, + Spec: api.PersistentVolumeSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{ + Path: "/tmp/data01", + }, + }, + ClaimRef: claimRef, + }, + Status: api.PersistentVolumeStatus{ + Phase: api.VolumePending, + }, + } + + volumeIndex := NewPersistentVolumeOrderedIndex() + mockClient := &mockBinderClient{ + claim: claim, + } + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + + // adds the volume to the index, making the volume available. + // pv also completed provisioning, so syncClaim should cause claim's phase to advance to Bound + syncVolume(volumeIndex, mockClient, pv) + if mockClient.volume.Status.Phase != api.VolumeAvailable { + t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) + } + if mockClient.claim.Status.Phase != api.ClaimBound { + t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase) + } +} + func TestExampleObjects(t *testing.T) { scenarios := map[string]struct { expected interface{}