mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
trigger syncClaim after pv provisioning to reduce wait
This commit is contained in:
parent
a45c87864e
commit
66b5844841
@ -225,7 +225,7 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
|||||||
nextPhase = api.VolumeAvailable
|
nextPhase = api.VolumeAvailable
|
||||||
|
|
||||||
if volume.Spec.ClaimRef != nil {
|
if volume.Spec.ClaimRef != nil {
|
||||||
_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
// Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
|
// Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
|
||||||
// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
|
// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
|
||||||
@ -249,6 +249,14 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
|
return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dynamically provisioned claims remain Pending until its volume is completely provisioned.
|
||||||
|
// The provisioner updates the PV and triggers this update for the volume. Explicitly sync'ing
|
||||||
|
// the claim here prevents the need to wait until the next sync period when the claim would normally
|
||||||
|
// advance to Bound phase. Otherwise, the maximum wait time for the claim to be Bound is the default sync period.
|
||||||
|
if claim != nil && claim.Status.Phase == api.ClaimPending && keyExists(qosProvisioningKey, claim.Annotations) && isProvisioningComplete(volume) {
|
||||||
|
syncClaim(volumeIndex, binderClient, claim)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name)
|
glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name)
|
||||||
|
|
||||||
|
@ -143,6 +143,76 @@ func TestClaimRace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestClaimSyncAfterVolumeProvisioning(t *testing.T) {
|
||||||
|
// Tests that binder.syncVolume will also syncClaim if the PV has completed
|
||||||
|
// provisioning but the claim is still Pending. We want to advance to Bound
|
||||||
|
// without having to wait until the binder's next sync period.
|
||||||
|
claim := &api.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: "bar",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
qosProvisioningKey: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PersistentVolumeClaimSpec{
|
||||||
|
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: api.PersistentVolumeClaimStatus{
|
||||||
|
Phase: api.ClaimPending,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
|
||||||
|
claimRef, _ := api.GetReference(claim)
|
||||||
|
|
||||||
|
pv := &api.PersistentVolume{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
pvProvisioningRequiredAnnotationKey: pvProvisioningCompletedAnnotationValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PersistentVolumeSpec{
|
||||||
|
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||||
|
Capacity: api.ResourceList{
|
||||||
|
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||||
|
},
|
||||||
|
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||||
|
HostPath: &api.HostPathVolumeSource{
|
||||||
|
Path: "/tmp/data01",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ClaimRef: claimRef,
|
||||||
|
},
|
||||||
|
Status: api.PersistentVolumeStatus{
|
||||||
|
Phase: api.VolumePending,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeIndex := NewPersistentVolumeOrderedIndex()
|
||||||
|
mockClient := &mockBinderClient{
|
||||||
|
claim: claim,
|
||||||
|
}
|
||||||
|
|
||||||
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
|
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||||
|
|
||||||
|
// adds the volume to the index, making the volume available.
|
||||||
|
// pv also completed provisioning, so syncClaim should cause claim's phase to advance to Bound
|
||||||
|
syncVolume(volumeIndex, mockClient, pv)
|
||||||
|
if mockClient.volume.Status.Phase != api.VolumeAvailable {
|
||||||
|
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
|
||||||
|
}
|
||||||
|
if mockClient.claim.Status.Phase != api.ClaimBound {
|
||||||
|
t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestExampleObjects(t *testing.T) {
|
func TestExampleObjects(t *testing.T) {
|
||||||
scenarios := map[string]struct {
|
scenarios := map[string]struct {
|
||||||
expected interface{}
|
expected interface{}
|
||||||
|
Loading…
Reference in New Issue
Block a user