From 6fa527a460551d617e80d3fd8350f3cc1ad7770b Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:02 +0200 Subject: [PATCH 01/34] Remove all three PersistentVolume controllers. We will add new ones gradually in smaller chunks. --- ...ersistentvolume_claim_binder_controller.go | 530 ------------- ...tentvolume_claim_binder_controller_test.go | 732 ------------------ ...persistentvolume_provisioner_controller.go | 536 ------------- ...stentvolume_provisioner_controller_test.go | 295 ------- .../persistentvolume_recycler_controller.go | 415 ---------- ...rsistentvolume_recycler_controller_test.go | 265 ------- 6 files changed, 2773 deletions(-) delete mode 100644 pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go delete mode 100644 pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go delete mode 100644 pkg/controller/persistentvolume/persistentvolume_provisioner_controller.go delete mode 100644 pkg/controller/persistentvolume/persistentvolume_provisioner_controller_test.go delete mode 100644 pkg/controller/persistentvolume/persistentvolume_recycler_controller.go delete mode 100644 pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go deleted file mode 100644 index a0e105a09c4..00000000000 --- a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller.go +++ /dev/null @@ -1,530 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "fmt" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/metrics" - "k8s.io/kubernetes/pkg/watch" - - "github.com/golang/glog" -) - -// PersistentVolumeClaimBinder is a controller that synchronizes PersistentVolumeClaims. -type PersistentVolumeClaimBinder struct { - volumeIndex *persistentVolumeOrderedIndex - volumeController *framework.Controller - claimController *framework.Controller - client binderClient - stopChannels map[string]chan struct{} - lock sync.RWMutex -} - -// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder -func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder { - if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("pv_claim_binder_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) - } - volumeIndex := NewPersistentVolumeOrderedIndex() - binderClient := NewBinderClient(kubeClient) - binder := &PersistentVolumeClaimBinder{ - volumeIndex: volumeIndex, - client: binderClient, - } - - _, volumeController := framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumes().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumes().Watch(options) - }, - }, - &api.PersistentVolume{}, - // TODO: Can we have much longer period here? - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: binder.addVolume, - UpdateFunc: binder.updateVolume, - DeleteFunc: binder.deleteVolume, - }, - ) - _, claimController := framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) - }, - }, - &api.PersistentVolumeClaim{}, - // TODO: Can we have much longer period here? - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: binder.addClaim, - UpdateFunc: binder.updateClaim, - DeleteFunc: binder.deleteClaim, - }, - ) - - binder.claimController = claimController - binder.volumeController = volumeController - - return binder -} -func (binder *PersistentVolumeClaimBinder) addVolume(obj interface{}) { - binder.lock.Lock() - defer binder.lock.Unlock() - pv, ok := obj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Expected PersistentVolume but handler received %+v", obj) - return - } - if err := syncVolume(binder.volumeIndex, binder.client, pv); err != nil { - glog.Errorf("PVClaimBinder could not add volume %s: %+v", pv.Name, err) - } -} - -func (binder *PersistentVolumeClaimBinder) updateVolume(oldObj, newObj interface{}) { - binder.lock.Lock() - defer binder.lock.Unlock() - newVolume, ok := newObj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) - return - } - if err := binder.volumeIndex.Update(newVolume); err != nil { - glog.Errorf("Error updating volume %s in index: %v", newVolume.Name, err) - return - } - if err := syncVolume(binder.volumeIndex, binder.client, newVolume); err != nil { - glog.Errorf("PVClaimBinder could not update volume %s: %+v", newVolume.Name, err) - } -} - -func (binder *PersistentVolumeClaimBinder) deleteVolume(obj interface{}) { - binder.lock.Lock() - defer binder.lock.Unlock() - volume, ok := obj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Expected PersistentVolume but handler received %+v", obj) - return - } - if err := binder.volumeIndex.Delete(volume); err != nil { - glog.Errorf("Error deleting volume %s from index: %v", volume.Name, err) - } -} - -func (binder *PersistentVolumeClaimBinder) addClaim(obj interface{}) { - binder.lock.Lock() - defer binder.lock.Unlock() - claim, ok := obj.(*api.PersistentVolumeClaim) - if !ok { - glog.Errorf("Expected PersistentVolumeClaim but handler received %+v", obj) - return - } - if err := syncClaim(binder.volumeIndex, binder.client, claim); err != nil { - glog.Errorf("PVClaimBinder could not add claim %s: %+v", claim.Name, err) - } -} - -func (binder *PersistentVolumeClaimBinder) updateClaim(oldObj, newObj interface{}) { - binder.lock.Lock() - defer binder.lock.Unlock() - newClaim, ok := newObj.(*api.PersistentVolumeClaim) - if !ok { - glog.Errorf("Expected PersistentVolumeClaim but handler received %+v", newObj) - return - } - if err := syncClaim(binder.volumeIndex, binder.client, newClaim); err != nil { - glog.Errorf("PVClaimBinder could not update claim %s: %+v", newClaim.Name, err) - } -} - -func (binder *PersistentVolumeClaimBinder) deleteClaim(obj interface{}) { - binder.lock.Lock() - defer binder.lock.Unlock() - var volume *api.PersistentVolume - if pvc, ok := obj.(*api.PersistentVolumeClaim); ok { - if pvObj, exists, _ := binder.volumeIndex.GetByKey(pvc.Spec.VolumeName); exists { - if pv, ok := pvObj.(*api.PersistentVolume); ok { - volume = pv - } - } - } - if unk, ok := obj.(cache.DeletedFinalStateUnknown); ok && unk.Obj != nil { - if pv, ok := unk.Obj.(*api.PersistentVolume); ok { - volume = pv - } - } - - // sync the volume when its claim is deleted. Explicitly sync'ing the volume here in response to - // claim deletion prevents the volume from waiting until the next sync period for its Release. - if volume != nil { - err := syncVolume(binder.volumeIndex, binder.client, volume) - if err != nil { - glog.Errorf("PVClaimBinder could not update volume %s from deleteClaim handler: %+v", volume.Name, err) - } - } -} - -func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, volume *api.PersistentVolume) (err error) { - glog.V(5).Infof("Synchronizing PersistentVolume[%s], current phase: %s\n", volume.Name, volume.Status.Phase) - - // The PV may have been modified by parallel call to syncVolume, load - // the current version. - newPv, err := binderClient.GetPersistentVolume(volume.Name) - if err != nil { - return fmt.Errorf("Cannot reload volume %s: %v", volume.Name, err) - } - volume = newPv - - // volumes can be in one of the following states: - // - // VolumePending -- default value -- not bound to a claim and not yet processed through this controller. - // VolumeAvailable -- not bound to a claim, but processed at least once and found in this controller's volumeIndex. - // VolumeBound -- bound to a claim because volume.Spec.ClaimRef != nil. Claim status may not be correct. - // VolumeReleased -- volume.Spec.ClaimRef != nil but the claim has been deleted by the user. - // VolumeFailed -- volume.Spec.ClaimRef != nil and the volume failed processing in the recycler - currentPhase := volume.Status.Phase - nextPhase := currentPhase - - // Always store the newest volume state in local cache. - _, exists, err := volumeIndex.Get(volume) - if err != nil { - return err - } - if !exists { - volumeIndex.Add(volume) - } else { - volumeIndex.Update(volume) - } - - if isBeingProvisioned(volume) { - glog.V(4).Infof("Skipping PersistentVolume[%s], waiting for provisioning to finish", volume.Name) - return nil - } - - switch currentPhase { - case api.VolumePending: - - // 4 possible states: - // 1. ClaimRef != nil, Claim exists, Claim UID == ClaimRef UID: Prebound to claim. Make volume available for binding (it will match PVC). - // 2. ClaimRef != nil, Claim exists, Claim UID != ClaimRef UID: Recently recycled. Remove bind. Make volume available for new claim. - // 3. ClaimRef != nil, Claim !exists: Recently recycled. Remove bind. Make volume available for new claim. - // 4. ClaimRef == nil: Neither recycled nor prebound. Make volume available for binding. - nextPhase = api.VolumeAvailable - - if volume.Spec.ClaimRef != nil { - claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) - switch { - case err != nil && !errors.IsNotFound(err): - return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err) - case errors.IsNotFound(err) || (claim != nil && claim.UID != volume.Spec.ClaimRef.UID): - glog.V(5).Infof("PersistentVolume[%s] has a claim ref to a claim which does not exist", volume.Name) - if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { - // Pending volumes that have a ClaimRef where the claim is missing were recently recycled. - // The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle. - // removing ClaimRef unbinds the volume - clone, err := conversion.NewCloner().DeepCopy(volume) - if err != nil { - return fmt.Errorf("Error cloning pv: %v", err) - } - volumeClone, ok := clone.(*api.PersistentVolume) - if !ok { - return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone) - } - glog.V(5).Infof("PersistentVolume[%s] is recently recycled; remove claimRef.", volume.Name) - volumeClone.Spec.ClaimRef = nil - - if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil { - return fmt.Errorf("Unexpected error saving PersistentVolume: %+v", err) - } else { - volume = updatedVolume - volumeIndex.Update(volume) - } - } else { - // Pending volumes that has a ClaimRef and the claim is missing and is was not recycled. - // It must have been freshly provisioned and the claim was deleted during the provisioning. - // Mark the volume as Released, it will be deleted. - nextPhase = api.VolumeReleased - } - } - - // Dynamically provisioned claims remain Pending until its volume is completely provisioned. - // The provisioner updates the PV and triggers this update for the volume. Explicitly sync'ing - // the claim here prevents the need to wait until the next sync period when the claim would normally - // advance to Bound phase. Otherwise, the maximum wait time for the claim to be Bound is the default sync period. - if claim != nil && claim.Status.Phase == api.ClaimPending && keyExists(qosProvisioningKey, claim.Annotations) && isProvisioningComplete(volume) { - syncClaim(volumeIndex, binderClient, claim) - } - } - glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name) - - // available volumes await a claim - case api.VolumeAvailable: - if volume.Spec.ClaimRef != nil { - _, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) - if err == nil { - // change of phase will trigger an update event with the newly bound volume - glog.V(5).Infof("PersistentVolume[%s] is now bound\n", volume.Name) - nextPhase = api.VolumeBound - } else { - if errors.IsNotFound(err) { - nextPhase = api.VolumeReleased - } - } - } - - //bound volumes require verification of their bound claims - case api.VolumeBound: - if volume.Spec.ClaimRef == nil { - return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume) - } else { - claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) - - // A volume is Released when its bound claim cannot be found in the API server. - // A claim by the same name can be found if deleted and recreated before this controller can release - // the volume from the original claim, so a UID check is necessary. - if err != nil { - if errors.IsNotFound(err) { - nextPhase = api.VolumeReleased - } else { - return err - } - } else if claim != nil && claim.UID != volume.Spec.ClaimRef.UID { - nextPhase = api.VolumeReleased - } - } - - // released volumes require recycling - case api.VolumeReleased: - if volume.Spec.ClaimRef == nil { - return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume) - } else { - // another process is watching for released volumes. - // PersistentVolumeReclaimPolicy is set per PersistentVolume - // Recycle - sets the PV to Pending and back under this controller's management - // Delete - delete events are handled by this controller's watch. PVs are removed from the index. - } - - // volumes are removed by processes external to this binder and must be removed from the cluster - case api.VolumeFailed: - if volume.Spec.ClaimRef == nil { - return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume) - } else { - glog.V(5).Infof("PersistentVolume[%s] previously failed recycling. Skipping.\n", volume.Name) - } - } - - if currentPhase != nextPhase { - volume.Status.Phase = nextPhase - - // a change in state will trigger another update through this controller. - // each pass through this controller evaluates current phase and decides whether or not to change to the next phase - glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", volume.Name, currentPhase, nextPhase) - volume, err := binderClient.UpdatePersistentVolumeStatus(volume) - if err != nil { - // Rollback to previous phase - volume.Status.Phase = currentPhase - } - volumeIndex.Update(volume) - } - - return nil -} - -func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, claim *api.PersistentVolumeClaim) (err error) { - glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s] for binding", claim.Name) - - // The claim may have been modified by parallel call to syncClaim, load - // the current version. - newClaim, err := binderClient.GetPersistentVolumeClaim(claim.Namespace, claim.Name) - if err != nil { - return fmt.Errorf("Cannot reload claim %s/%s: %v", claim.Namespace, claim.Name, err) - } - claim = newClaim - - switch claim.Status.Phase { - case api.ClaimPending: - // claims w/ a storage-class annotation for provisioning with *only* match volumes with a ClaimRef of the claim. - volume, err := volumeIndex.findBestMatchForClaim(claim) - if err != nil { - return err - } - - if volume == nil { - glog.V(5).Infof("A volume match does not exist for persistent claim: %s", claim.Name) - return nil - } - - if isBeingProvisioned(volume) { - glog.V(5).Infof("PersistentVolume[%s] for PersistentVolumeClaim[%s/%s] is still being provisioned.", volume.Name, claim.Namespace, claim.Name) - return nil - } - - claimRef, err := api.GetReference(claim) - if err != nil { - return fmt.Errorf("Unexpected error getting claim reference: %v\n", err) - } - - // Make a binding reference to the claim by persisting claimRef on the volume. - // The local cache must be updated with the new bind to prevent subsequent - // claims from binding to the volume. - if volume.Spec.ClaimRef == nil { - clone, err := conversion.NewCloner().DeepCopy(volume) - if err != nil { - return fmt.Errorf("Error cloning pv: %v", err) - } - volumeClone, ok := clone.(*api.PersistentVolume) - if !ok { - return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone) - } - volumeClone.Spec.ClaimRef = claimRef - if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil { - return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err) - } else { - volume = updatedVolume - volumeIndex.Update(updatedVolume) - } - } - - // the bind is persisted on the volume above and will always match the claim in a search. - // claim would remain Pending if the update fails, so processing this state is idempotent. - // this only needs to be processed once. - if claim.Spec.VolumeName != volume.Name { - claim.Spec.VolumeName = volume.Name - claim, err = binderClient.UpdatePersistentVolumeClaim(claim) - if err != nil { - return fmt.Errorf("Error updating claim with VolumeName %s: %+v\n", volume.Name, err) - } - } - - claim.Status.Phase = api.ClaimBound - claim.Status.AccessModes = volume.Spec.AccessModes - claim.Status.Capacity = volume.Spec.Capacity - _, err = binderClient.UpdatePersistentVolumeClaimStatus(claim) - if err != nil { - return fmt.Errorf("Unexpected error saving claim status: %+v", err) - } - - case api.ClaimBound: - // no-op. Claim is bound, values from PV are set. PVCs are technically mutable in the API server - // and we don't want to handle those changes at this time. - - default: - return fmt.Errorf("Unknown state for PVC: %#v", claim) - - } - - glog.V(5).Infof("PersistentVolumeClaim[%s] is bound\n", claim.Name) - return nil -} - -func isBeingProvisioned(volume *api.PersistentVolume) bool { - value, found := volume.Annotations[pvProvisioningRequiredAnnotationKey] - if found && value != pvProvisioningCompletedAnnotationValue { - return true - } - return false -} - -// Run starts all of this binder's control loops -func (controller *PersistentVolumeClaimBinder) Run() { - glog.V(5).Infof("Starting PersistentVolumeClaimBinder\n") - if controller.stopChannels == nil { - controller.stopChannels = make(map[string]chan struct{}) - } - - if _, exists := controller.stopChannels["volumes"]; !exists { - controller.stopChannels["volumes"] = make(chan struct{}) - go controller.volumeController.Run(controller.stopChannels["volumes"]) - } - - if _, exists := controller.stopChannels["claims"]; !exists { - controller.stopChannels["claims"] = make(chan struct{}) - go controller.claimController.Run(controller.stopChannels["claims"]) - } -} - -// Stop gracefully shuts down this binder -func (controller *PersistentVolumeClaimBinder) Stop() { - glog.V(5).Infof("Stopping PersistentVolumeClaimBinder\n") - for name, stopChan := range controller.stopChannels { - close(stopChan) - delete(controller.stopChannels, name) - } -} - -// binderClient abstracts access to PVs and PVCs -type binderClient interface { - GetPersistentVolume(name string) (*api.PersistentVolume, error) - UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) - DeletePersistentVolume(volume *api.PersistentVolume) error - UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) - GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) - UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) -} - -func NewBinderClient(c clientset.Interface) binderClient { - return &realBinderClient{c} -} - -type realBinderClient struct { - client clientset.Interface -} - -func (c *realBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Get(name) -} - -func (c *realBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Update(volume) -} - -func (c *realBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error { - return c.client.Core().PersistentVolumes().Delete(volume.Name, nil) -} - -func (c *realBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().UpdateStatus(volume) -} - -func (c *realBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) { - return c.client.Core().PersistentVolumeClaims(namespace).Get(name) -} - -func (c *realBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - return c.client.Core().PersistentVolumeClaims(claim.Namespace).Update(claim) -} - -func (c *realBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - return c.client.Core().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim) -} diff --git a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go deleted file mode 100644 index f01908c7f7f..00000000000 --- a/pkg/controller/persistentvolume/persistentvolume_claim_binder_controller_test.go +++ /dev/null @@ -1,732 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "fmt" - "os" - "reflect" - "testing" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "k8s.io/kubernetes/pkg/client/testing/core" - "k8s.io/kubernetes/pkg/types" - utiltesting "k8s.io/kubernetes/pkg/util/testing" - "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/host_path" - volumetest "k8s.io/kubernetes/pkg/volume/testing" -) - -func TestRunStop(t *testing.T) { - clientset := fake.NewSimpleClientset() - binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second) - - if len(binder.stopChannels) != 0 { - t.Errorf("Non-running binder should not have any stopChannels. Got %v", len(binder.stopChannels)) - } - - binder.Run() - - if len(binder.stopChannels) != 2 { - t.Errorf("Running binder should have exactly 2 stopChannels. Got %v", len(binder.stopChannels)) - } - - binder.Stop() - - if len(binder.stopChannels) != 0 { - t.Errorf("Non-running binder should not have any stopChannels. Got %v", len(binder.stopChannels)) - } -} - -func TestClaimRace(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("claimbinder-test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - c1 := &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{ - Name: "c1", - }, - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), - }, - }, - }, - Status: api.PersistentVolumeClaimStatus{ - Phase: api.ClaimPending, - }, - } - c1.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") - - c2 := &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{ - Name: "c2", - }, - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), - }, - }, - }, - Status: api.PersistentVolumeClaimStatus{ - Phase: api.ClaimPending, - }, - } - c2.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") - - v := &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - }, - Spec: api.PersistentVolumeSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: fmt.Sprintf("%s/data01", tmpDir), - }, - }, - }, - Status: api.PersistentVolumeStatus{ - Phase: api.VolumePending, - }, - } - - volumeIndex := NewPersistentVolumeOrderedIndex() - mockClient := &mockBinderClient{} - mockClient.volume = v - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - // adds the volume to the index, making the volume available - syncVolume(volumeIndex, mockClient, v) - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - if _, exists, _ := volumeIndex.Get(v); !exists { - t.Errorf("Expected to find volume in index but it did not exist") - } - - // add the claim to fake API server - mockClient.UpdatePersistentVolumeClaim(c1) - // an initial sync for a claim matches the volume - err = syncClaim(volumeIndex, mockClient, c1) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if c1.Status.Phase != api.ClaimBound { - t.Errorf("Expected phase %s but got %s", api.ClaimBound, c1.Status.Phase) - } - - // before the volume gets updated w/ claimRef, a 2nd claim can attempt to bind and find the same volume - // add the 2nd claim to fake API server - mockClient.UpdatePersistentVolumeClaim(c2) - err = syncClaim(volumeIndex, mockClient, c2) - if err != nil { - t.Errorf("unexpected error for unmatched claim: %v", err) - } - if c2.Status.Phase != api.ClaimPending { - t.Errorf("Expected phase %s but got %s", api.ClaimPending, c2.Status.Phase) - } -} - -func TestNewClaimWithSameNameAsOldClaim(t *testing.T) { - c1 := &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{ - Name: "c1", - Namespace: "foo", - UID: "12345", - }, - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), - }, - }, - }, - Status: api.PersistentVolumeClaimStatus{ - Phase: api.ClaimBound, - }, - } - c1.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") - - v := &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - }, - Spec: api.PersistentVolumeSpec{ - ClaimRef: &api.ObjectReference{ - Name: c1.Name, - Namespace: c1.Namespace, - UID: "45678", - }, - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: "/tmp/data01", - }, - }, - }, - Status: api.PersistentVolumeStatus{ - Phase: api.VolumeBound, - }, - } - - volumeIndex := NewPersistentVolumeOrderedIndex() - mockClient := &mockBinderClient{ - claim: c1, - volume: v, - } - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)) - - syncVolume(volumeIndex, mockClient, v) - if mockClient.volume.Status.Phase != api.VolumeReleased { - t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase) - } - -} - -func TestClaimSyncAfterVolumeProvisioning(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("claimbinder-test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - // Tests that binder.syncVolume will also syncClaim if the PV has completed - // provisioning but the claim is still Pending. We want to advance to Bound - // without having to wait until the binder's next sync period. - claim := &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Namespace: "bar", - Annotations: map[string]string{ - qosProvisioningKey: "foo", - }, - }, - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), - }, - }, - }, - Status: api.PersistentVolumeClaimStatus{ - Phase: api.ClaimPending, - }, - } - claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") - claimRef, _ := api.GetReference(claim) - - pv := &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Annotations: map[string]string{ - pvProvisioningRequiredAnnotationKey: pvProvisioningCompletedAnnotationValue, - }, - }, - Spec: api.PersistentVolumeSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: fmt.Sprintf("%s/data01", tmpDir), - }, - }, - ClaimRef: claimRef, - }, - Status: api.PersistentVolumeStatus{ - Phase: api.VolumePending, - }, - } - - volumeIndex := NewPersistentVolumeOrderedIndex() - mockClient := &mockBinderClient{ - claim: claim, - volume: pv, - } - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - - // adds the volume to the index, making the volume available. - // pv also completed provisioning, so syncClaim should cause claim's phase to advance to Bound - syncVolume(volumeIndex, mockClient, pv) - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - if mockClient.claim.Status.Phase != api.ClaimBound { - t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase) - } -} - -func TestExampleObjects(t *testing.T) { - scenarios := map[string]struct { - expected interface{} - }{ - "claims/claim-01.yaml": { - expected: &api.PersistentVolumeClaim{ - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), - }, - }, - }, - }, - }, - "claims/claim-02.yaml": { - expected: &api.PersistentVolumeClaim{ - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"), - }, - }, - }, - }, - }, - "volumes/local-01.yaml": { - expected: &api.PersistentVolume{ - Spec: api.PersistentVolumeSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: "/somepath/data01", - }, - }, - }, - }, - }, - "volumes/local-02.yaml": { - expected: &api.PersistentVolume{ - Spec: api.PersistentVolumeSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: "/somepath/data02", - }, - }, - PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle, - }, - }, - }, - } - - for name, scenario := range scenarios { - codec := api.Codecs.UniversalDecoder() - o := core.NewObjects(api.Scheme, codec) - if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/"+name, o, codec); err != nil { - t.Fatal(err) - } - - clientset := &fake.Clientset{} - clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) - - if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolumeClaim{}) { - pvc, err := clientset.Core().PersistentVolumeClaims("ns").Get("doesntmatter") - if err != nil { - t.Fatalf("Error retrieving object: %v", err) - } - - expected := scenario.expected.(*api.PersistentVolumeClaim) - if pvc.Spec.AccessModes[0] != expected.Spec.AccessModes[0] { - t.Errorf("Unexpected mismatch. Got %v wanted %v", pvc.Spec.AccessModes[0], expected.Spec.AccessModes[0]) - } - - aQty := pvc.Spec.Resources.Requests[api.ResourceStorage] - bQty := expected.Spec.Resources.Requests[api.ResourceStorage] - aSize := aQty.Value() - bSize := bQty.Value() - - if aSize != bSize { - t.Errorf("Unexpected mismatch. Got %v wanted %v", aSize, bSize) - } - } - - if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolume{}) { - pv, err := clientset.Core().PersistentVolumes().Get("doesntmatter") - if err != nil { - t.Fatalf("Error retrieving object: %v", err) - } - - expected := scenario.expected.(*api.PersistentVolume) - if pv.Spec.AccessModes[0] != expected.Spec.AccessModes[0] { - t.Errorf("Unexpected mismatch. Got %v wanted %v", pv.Spec.AccessModes[0], expected.Spec.AccessModes[0]) - } - - aQty := pv.Spec.Capacity[api.ResourceStorage] - bQty := expected.Spec.Capacity[api.ResourceStorage] - aSize := aQty.Value() - bSize := bQty.Value() - - if aSize != bSize { - t.Errorf("Unexpected mismatch. Got %v wanted %v", aSize, bSize) - } - - if pv.Spec.HostPath.Path != expected.Spec.HostPath.Path { - t.Errorf("Unexpected mismatch. Got %v wanted %v", pv.Spec.HostPath.Path, expected.Spec.HostPath.Path) - } - } - } -} - -func TestBindingWithExamples(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("claimbinder-test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - codec := api.Codecs.UniversalDecoder() - o := core.NewObjects(api.Scheme, codec) - if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil { - t.Fatal(err) - } - if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil { - t.Fatal(err) - } - - clientset := &fake.Clientset{} - clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) - - pv, err := clientset.Core().PersistentVolumes().Get("any") - if err != nil { - t.Errorf("Unexpected error getting PV from client: %v", err) - } - pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle - if err != nil { - t.Errorf("Unexpected error getting PV from client: %v", err) - } - pv.ObjectMeta.SelfLink = testapi.Default.SelfLink("pv", "") - - // the default value of the PV is Pending. if processed at least once, its status in etcd is Available. - // There was a bug where only Pending volumes were being indexed and made ready for claims. - // Test that !Pending gets correctly added - pv.Status.Phase = api.VolumeAvailable - - claim, error := clientset.Core().PersistentVolumeClaims("ns").Get("any") - if error != nil { - t.Errorf("Unexpected error getting PVC from client: %v", err) - } - claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") - - volumeIndex := NewPersistentVolumeOrderedIndex() - mockClient := &mockBinderClient{ - volume: pv, - claim: claim, - } - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - - recycler := &PersistentVolumeRecycler{ - kubeClient: clientset, - client: mockClient, - pluginMgr: plugMgr, - } - - // adds the volume to the index, making the volume available - syncVolume(volumeIndex, mockClient, pv) - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - - // add the claim to fake API server - mockClient.UpdatePersistentVolumeClaim(claim) - // an initial sync for a claim will bind it to an unbound volume - syncClaim(volumeIndex, mockClient, claim) - - // bind expected on pv.Spec but status update hasn't happened yet - if mockClient.volume.Spec.ClaimRef == nil { - t.Errorf("Expected ClaimRef but got nil for pv.Status.ClaimRef\n") - } - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - if mockClient.claim.Spec.VolumeName != pv.Name { - t.Errorf("Expected claim.Spec.VolumeName %s but got %s", mockClient.claim.Spec.VolumeName, pv.Name) - } - if mockClient.claim.Status.Phase != api.ClaimBound { - t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase) - } - - // state changes in pvc triggers sync that sets pv attributes to pvc.Status - syncClaim(volumeIndex, mockClient, claim) - if len(mockClient.claim.Status.AccessModes) == 0 { - t.Errorf("Expected %d access modes but got 0", len(pv.Spec.AccessModes)) - } - - // persisting the bind to pv.Spec.ClaimRef triggers a sync - syncVolume(volumeIndex, mockClient, mockClient.volume) - if mockClient.volume.Status.Phase != api.VolumeBound { - t.Errorf("Expected phase %s but got %s", api.VolumeBound, mockClient.volume.Status.Phase) - } - - // pretend the user deleted their claim. periodic resync picks it up. - mockClient.claim = nil - syncVolume(volumeIndex, mockClient, mockClient.volume) - - if mockClient.volume.Status.Phase != api.VolumeReleased { - t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase) - } - - // released volumes with a PersistentVolumeReclaimPolicy (recycle/delete) can have further processing - err = recycler.reclaimVolume(mockClient.volume) - if err != nil { - t.Errorf("Unexpected error reclaiming volume: %+v", err) - } - if mockClient.volume.Status.Phase != api.VolumePending { - t.Errorf("Expected phase %s but got %s", api.VolumePending, mockClient.volume.Status.Phase) - } - - // after the recycling changes the phase to Pending, the binder picks up again - // to remove any vestiges of binding and make the volume Available again - syncVolume(volumeIndex, mockClient, mockClient.volume) - - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - if mockClient.volume.Spec.ClaimRef != nil { - t.Errorf("Expected nil ClaimRef: %+v", mockClient.volume.Spec.ClaimRef) - } -} - -func TestCasting(t *testing.T) { - clientset := fake.NewSimpleClientset() - binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second) - - pv := &api.PersistentVolume{} - unk := cache.DeletedFinalStateUnknown{} - pvc := &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{Name: "foo"}, - Status: api.PersistentVolumeClaimStatus{Phase: api.ClaimBound}, - } - - // Inject mockClient into the binder. This prevents weird errors on stderr - // as the binder wants to load PV/PVC from API server. - mockClient := &mockBinderClient{ - volume: pv, - claim: pvc, - } - binder.client = mockClient - - // none of these should fail casting. - // the real test is not failing when passed DeletedFinalStateUnknown in the deleteHandler - binder.addVolume(pv) - binder.updateVolume(pv, pv) - binder.deleteVolume(pv) - binder.deleteVolume(unk) - binder.addClaim(pvc) - binder.updateClaim(pvc, pvc) -} - -func TestRecycledPersistentVolumeUID(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("claimbinder-test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - codec := api.Codecs.UniversalDecoder() - o := core.NewObjects(api.Scheme, codec) - if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil { - t.Fatal(err) - } - if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil { - t.Fatal(err) - } - - clientset := &fake.Clientset{} - clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) - - pv, err := clientset.Core().PersistentVolumes().Get("any") - if err != nil { - t.Errorf("Unexpected error getting PV from client: %v", err) - } - pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle - if err != nil { - t.Errorf("Unexpected error getting PV from client: %v", err) - } - pv.ObjectMeta.SelfLink = testapi.Default.SelfLink("pv", "") - - // the default value of the PV is Pending. if processed at least once, its status in etcd is Available. - // There was a bug where only Pending volumes were being indexed and made ready for claims. - // Test that !Pending gets correctly added - pv.Status.Phase = api.VolumeAvailable - - claim, error := clientset.Core().PersistentVolumeClaims("ns").Get("any") - if error != nil { - t.Errorf("Unexpected error getting PVC from client: %v", err) - } - claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "") - claim.ObjectMeta.UID = types.UID("uid1") - - volumeIndex := NewPersistentVolumeOrderedIndex() - mockClient := &mockBinderClient{ - volume: pv, - claim: claim, - } - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - - recycler := &PersistentVolumeRecycler{ - kubeClient: clientset, - client: mockClient, - pluginMgr: plugMgr, - } - - // adds the volume to the index, making the volume available - syncVolume(volumeIndex, mockClient, pv) - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - - // add the claim to fake API server - mockClient.UpdatePersistentVolumeClaim(claim) - // an initial sync for a claim will bind it to an unbound volume - syncClaim(volumeIndex, mockClient, claim) - - // pretend the user deleted their claim. periodic resync picks it up. - mockClient.claim = nil - syncVolume(volumeIndex, mockClient, mockClient.volume) - - if mockClient.volume.Status.Phase != api.VolumeReleased { - t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase) - } - - // released volumes with a PersistentVolumeReclaimPolicy (recycle/delete) can have further processing - err = recycler.reclaimVolume(mockClient.volume) - if err != nil { - t.Errorf("Unexpected error reclaiming volume: %+v", err) - } - if mockClient.volume.Status.Phase != api.VolumePending { - t.Errorf("Expected phase %s but got %s", api.VolumePending, mockClient.volume.Status.Phase) - } - - // after the recycling changes the phase to Pending, the binder picks up again - // to remove any vestiges of binding and make the volume Available again - // - // explicitly set the claim's UID to a different value to ensure that a new claim with the same - // name as what the PV was previously bound still yields an available volume - claim.ObjectMeta.UID = types.UID("uid2") - mockClient.claim = claim - syncVolume(volumeIndex, mockClient, mockClient.volume) - - if mockClient.volume.Status.Phase != api.VolumeAvailable { - t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase) - } - if mockClient.volume.Spec.ClaimRef != nil { - t.Errorf("Expected nil ClaimRef: %+v", mockClient.volume.Spec.ClaimRef) - } -} - -type mockBinderClient struct { - volume *api.PersistentVolume - claim *api.PersistentVolumeClaim -} - -func (c *mockBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) { - return c.volume, nil -} - -func (c *mockBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - c.volume = volume - return c.volume, nil -} - -func (c *mockBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error { - c.volume = nil - return nil -} - -func (c *mockBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - c.volume = volume - return c.volume, nil -} - -func (c *mockBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) { - if c.claim != nil { - return c.claim, nil - } else { - return nil, errors.NewNotFound(api.Resource("persistentvolumes"), name) - } -} - -func (c *mockBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - c.claim = claim - return c.claim, nil -} - -func (c *mockBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - c.claim = claim - return c.claim, nil -} - -func newMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) { - return &mockRecycler{ - path: spec.PersistentVolume.Spec.HostPath.Path, - }, nil -} - -type mockRecycler struct { - path string - host volume.VolumeHost - volume.MetricsNil -} - -func (r *mockRecycler) GetPath() string { - return r.path -} - -func (r *mockRecycler) Recycle() error { - // return nil means recycle passed - return nil -} diff --git a/pkg/controller/persistentvolume/persistentvolume_provisioner_controller.go b/pkg/controller/persistentvolume/persistentvolume_provisioner_controller.go deleted file mode 100644 index fdb7804a3ea..00000000000 --- a/pkg/controller/persistentvolume/persistentvolume_provisioner_controller.go +++ /dev/null @@ -1,536 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "fmt" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/io" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/watch" - - "github.com/golang/glog" -) - -// PersistentVolumeProvisionerController reconciles the state of all PersistentVolumes and PersistentVolumeClaims. -type PersistentVolumeProvisionerController struct { - volumeController *framework.Controller - volumeStore cache.Store - claimController *framework.Controller - claimStore cache.Store - client controllerClient - cloud cloudprovider.Interface - provisioner volume.ProvisionableVolumePlugin - pluginMgr volume.VolumePluginMgr - stopChannels map[string]chan struct{} - mutex sync.RWMutex - clusterName string -} - -// constant name values for the controllers stopChannels map. -// the controller uses these for graceful shutdown -const volumesStopChannel = "volumes" -const claimsStopChannel = "claims" - -// NewPersistentVolumeProvisionerController creates a new PersistentVolumeProvisionerController -func NewPersistentVolumeProvisionerController(client controllerClient, syncPeriod time.Duration, clusterName string, plugins []volume.VolumePlugin, provisioner volume.ProvisionableVolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeProvisionerController, error) { - controller := &PersistentVolumeProvisionerController{ - client: client, - cloud: cloud, - provisioner: provisioner, - clusterName: clusterName, - } - - if err := controller.pluginMgr.InitPlugins(plugins, controller); err != nil { - return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolumeProvisionerController: %+v", err) - } - - glog.V(5).Infof("Initializing provisioner: %s", controller.provisioner.Name()) - controller.provisioner.Init(controller) - - controller.volumeStore, controller.volumeController = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.ListPersistentVolumes(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.WatchPersistentVolumes(options) - }, - }, - &api.PersistentVolume{}, - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: controller.handleAddVolume, - UpdateFunc: controller.handleUpdateVolume, - // delete handler not needed in this controller. - // volume deletion is handled by the recycler controller - }, - ) - controller.claimStore, controller.claimController = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.ListPersistentVolumeClaims(api.NamespaceAll, options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.WatchPersistentVolumeClaims(api.NamespaceAll, options) - }, - }, - &api.PersistentVolumeClaim{}, - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: controller.handleAddClaim, - UpdateFunc: controller.handleUpdateClaim, - // delete handler not needed. - // normal recycling applies when a claim is deleted. - // recycling is handled by the binding controller. - }, - ) - - return controller, nil -} - -func (controller *PersistentVolumeProvisionerController) handleAddVolume(obj interface{}) { - controller.mutex.Lock() - defer controller.mutex.Unlock() - cachedPv, _, _ := controller.volumeStore.Get(obj) - if pv, ok := cachedPv.(*api.PersistentVolume); ok { - err := controller.reconcileVolume(pv) - if err != nil { - glog.Errorf("Error reconciling volume %s: %+v", pv.Name, err) - } - } -} - -func (controller *PersistentVolumeProvisionerController) handleUpdateVolume(oldObj, newObj interface{}) { - // The flow for Update is the same as Add. - // A volume is only provisioned if not done so already. - controller.handleAddVolume(newObj) -} - -func (controller *PersistentVolumeProvisionerController) handleAddClaim(obj interface{}) { - controller.mutex.Lock() - defer controller.mutex.Unlock() - cachedPvc, exists, _ := controller.claimStore.Get(obj) - if !exists { - glog.Errorf("PersistentVolumeClaim does not exist in the local cache: %+v", obj) - return - } - if pvc, ok := cachedPvc.(*api.PersistentVolumeClaim); ok { - err := controller.reconcileClaim(pvc) - if err != nil { - glog.Errorf("Error encoutered reconciling claim %s: %+v", pvc.Name, err) - } - } -} - -func (controller *PersistentVolumeProvisionerController) handleUpdateClaim(oldObj, newObj interface{}) { - // The flow for Update is the same as Add. - // A volume is only provisioned for a claim if not done so already. - controller.handleAddClaim(newObj) -} - -func (controller *PersistentVolumeProvisionerController) reconcileClaim(claim *api.PersistentVolumeClaim) error { - glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s] for dynamic provisioning", claim.Name) - - // The claim may have been modified by parallel call to reconcileClaim, load - // the current version. - newClaim, err := controller.client.GetPersistentVolumeClaim(claim.Namespace, claim.Name) - if err != nil { - return fmt.Errorf("Cannot reload claim %s/%s: %v", claim.Namespace, claim.Name, err) - } - claim = newClaim - err = controller.claimStore.Update(claim) - if err != nil { - return fmt.Errorf("Cannot update claim %s/%s: %v", claim.Namespace, claim.Name, err) - } - - if controller.provisioner == nil { - return fmt.Errorf("No provisioner configured for controller") - } - - // no provisioning requested, return Pending. Claim may be pending indefinitely without a match. - if !keyExists(qosProvisioningKey, claim.Annotations) { - glog.V(5).Infof("PersistentVolumeClaim[%s] no provisioning required", claim.Name) - return nil - } - if len(claim.Spec.VolumeName) != 0 { - glog.V(5).Infof("PersistentVolumeClaim[%s] already bound. No provisioning required", claim.Name) - return nil - } - if isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, claim.Annotations) { - glog.V(5).Infof("PersistentVolumeClaim[%s] is already provisioned.", claim.Name) - return nil - } - - glog.V(5).Infof("PersistentVolumeClaim[%s] provisioning", claim.Name) - provisioner, err := controller.newProvisioner(controller.provisioner, claim, nil) - if err != nil { - return fmt.Errorf("Unexpected error getting new provisioner for claim %s: %v\n", claim.Name, err) - } - newVolume, err := provisioner.NewPersistentVolumeTemplate() - if err != nil { - return fmt.Errorf("Unexpected error getting new volume template for claim %s: %v\n", claim.Name, err) - } - - claimRef, err := api.GetReference(claim) - if err != nil { - return fmt.Errorf("Unexpected error getting claim reference for %s: %v\n", claim.Name, err) - } - - storageClass, _ := claim.Annotations[qosProvisioningKey] - - // the creation of this volume is the bind to the claim. - // The claim will match the volume during the next sync period when the volume is in the local cache - newVolume.Spec.ClaimRef = claimRef - newVolume.Annotations[pvProvisioningRequiredAnnotationKey] = "true" - newVolume.Annotations[qosProvisioningKey] = storageClass - newVolume, err = controller.client.CreatePersistentVolume(newVolume) - glog.V(5).Infof("Unprovisioned PersistentVolume[%s] created for PVC[%s], which will be fulfilled in the storage provider", newVolume.Name, claim.Name) - if err != nil { - return fmt.Errorf("PersistentVolumeClaim[%s] failed provisioning: %+v", claim.Name, err) - } - - claim.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue - _, err = controller.client.UpdatePersistentVolumeClaim(claim) - if err != nil { - glog.Errorf("error updating persistent volume claim: %v", err) - } - - return nil -} - -func (controller *PersistentVolumeProvisionerController) reconcileVolume(pv *api.PersistentVolume) error { - glog.V(5).Infof("PersistentVolume[%s] reconciling", pv.Name) - - // The PV may have been modified by parallel call to reconcileVolume, load - // the current version. - newPv, err := controller.client.GetPersistentVolume(pv.Name) - if err != nil { - return fmt.Errorf("Cannot reload volume %s: %v", pv.Name, err) - } - pv = newPv - - if pv.Spec.ClaimRef == nil { - glog.V(5).Infof("PersistentVolume[%s] is not bound to a claim. No provisioning required", pv.Name) - return nil - } - - // TODO: fix this leaky abstraction. Had to make our own store key because ClaimRef fails the default keyfunc (no Meta on object). - obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)) - if !exists { - return fmt.Errorf("PersistentVolumeClaim[%s/%s] not found in local cache", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) - } - - claim, ok := obj.(*api.PersistentVolumeClaim) - if !ok { - return fmt.Errorf("PersistentVolumeClaim expected, but got %v", obj) - } - - // no provisioning required, volume is ready and Bound - if !keyExists(pvProvisioningRequiredAnnotationKey, pv.Annotations) { - glog.V(5).Infof("PersistentVolume[%s] does not require provisioning", pv.Name) - return nil - } - - // provisioning is completed, volume is ready. - if isProvisioningComplete(pv) { - glog.V(5).Infof("PersistentVolume[%s] is bound and provisioning is complete", pv.Name) - if pv.Spec.ClaimRef.Namespace != claim.Namespace || pv.Spec.ClaimRef.Name != claim.Name { - return fmt.Errorf("pre-bind mismatch - expected %s but found %s/%s", claimToClaimKey(claim), pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) - } - return nil - } - - // provisioning is incomplete. Attempt to provision the volume. - glog.V(5).Infof("PersistentVolume[%s] provisioning in progress", pv.Name) - err = provisionVolume(pv, controller) - if err != nil { - return fmt.Errorf("Error provisioning PersistentVolume[%s]: %v", pv.Name, err) - } - - return nil -} - -// provisionVolume provisions a volume that has been created in the cluster but not yet fulfilled by -// the storage provider. -func provisionVolume(pv *api.PersistentVolume, controller *PersistentVolumeProvisionerController) error { - if isProvisioningComplete(pv) { - return fmt.Errorf("PersistentVolume[%s] is already provisioned", pv.Name) - } - - if _, exists := pv.Annotations[qosProvisioningKey]; !exists { - return fmt.Errorf("PersistentVolume[%s] does not contain a provisioning request. Provisioning not required.", pv.Name) - } - - if controller.provisioner == nil { - return fmt.Errorf("No provisioner found for volume: %s", pv.Name) - } - - // Find the claim in local cache - obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)) - if !exists { - return fmt.Errorf("Could not find PersistentVolumeClaim[%s/%s] in local cache", pv.Spec.ClaimRef.Name, pv.Name) - } - claim := obj.(*api.PersistentVolumeClaim) - - provisioner, _ := controller.newProvisioner(controller.provisioner, claim, pv) - err := provisioner.Provision(pv) - if err != nil { - glog.Errorf("Could not provision %s", pv.Name) - pv.Status.Phase = api.VolumeFailed - pv.Status.Message = err.Error() - if pv, apiErr := controller.client.UpdatePersistentVolumeStatus(pv); apiErr != nil { - return fmt.Errorf("PersistentVolume[%s] failed provisioning and also failed status update: %v - %v", pv.Name, err, apiErr) - } - return fmt.Errorf("PersistentVolume[%s] failed provisioning: %v", pv.Name, err) - } - - clone, err := conversion.NewCloner().DeepCopy(pv) - volumeClone, ok := clone.(*api.PersistentVolume) - if !ok { - return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone) - } - volumeClone.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue - - pv, err = controller.client.UpdatePersistentVolume(volumeClone) - if err != nil { - // TODO: https://github.com/kubernetes/kubernetes/issues/14443 - // the volume was created in the infrastructure and likely has a PV name on it, - // but we failed to save the annotation that marks the volume as provisioned. - return fmt.Errorf("Error updating PersistentVolume[%s] with provisioning completed annotation. There is a potential for dupes and orphans.", volumeClone.Name) - } - return nil -} - -// Run starts all of this controller's control loops -func (controller *PersistentVolumeProvisionerController) Run() { - glog.V(5).Infof("Starting PersistentVolumeProvisionerController\n") - if controller.stopChannels == nil { - controller.stopChannels = make(map[string]chan struct{}) - } - - if _, exists := controller.stopChannels[volumesStopChannel]; !exists { - controller.stopChannels[volumesStopChannel] = make(chan struct{}) - go controller.volumeController.Run(controller.stopChannels[volumesStopChannel]) - } - - if _, exists := controller.stopChannels[claimsStopChannel]; !exists { - controller.stopChannels[claimsStopChannel] = make(chan struct{}) - go controller.claimController.Run(controller.stopChannels[claimsStopChannel]) - } -} - -// Stop gracefully shuts down this controller -func (controller *PersistentVolumeProvisionerController) Stop() { - glog.V(5).Infof("Stopping PersistentVolumeProvisionerController\n") - for name, stopChan := range controller.stopChannels { - close(stopChan) - delete(controller.stopChannels, name) - } -} - -func (controller *PersistentVolumeProvisionerController) newProvisioner(plugin volume.ProvisionableVolumePlugin, claim *api.PersistentVolumeClaim, pv *api.PersistentVolume) (volume.Provisioner, error) { - tags := make(map[string]string) - tags[cloudVolumeCreatedForClaimNamespaceTag] = claim.Namespace - tags[cloudVolumeCreatedForClaimNameTag] = claim.Name - - // pv can be nil when the provisioner has not created the PV yet - if pv != nil { - tags[cloudVolumeCreatedForVolumeNameTag] = pv.Name - } - - volumeOptions := volume.VolumeOptions{ - Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], - AccessModes: claim.Spec.AccessModes, - PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, - CloudTags: &tags, - ClusterName: controller.clusterName, - } - - if pv != nil { - volumeOptions.PVName = pv.Name - } - - provisioner, err := plugin.NewProvisioner(volumeOptions) - return provisioner, err -} - -// controllerClient abstracts access to PVs and PVCs. Easy to mock for testing and wrap for real client. -type controllerClient interface { - CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) - ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) - WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) - GetPersistentVolume(name string) (*api.PersistentVolume, error) - UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) - DeletePersistentVolume(volume *api.PersistentVolume) error - UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) - - GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) - ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) - WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) - UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - - // provided to give VolumeHost and plugins access to the kube client - GetKubeClient() clientset.Interface -} - -func NewControllerClient(c clientset.Interface) controllerClient { - return &realControllerClient{c} -} - -var _ controllerClient = &realControllerClient{} - -type realControllerClient struct { - client clientset.Interface -} - -func (c *realControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Get(name) -} - -func (c *realControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) { - return c.client.Core().PersistentVolumes().List(options) -} - -func (c *realControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) { - return c.client.Core().PersistentVolumes().Watch(options) -} - -func (c *realControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Create(pv) -} - -func (c *realControllerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Update(volume) -} - -func (c *realControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error { - return c.client.Core().PersistentVolumes().Delete(volume.Name, nil) -} - -func (c *realControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().UpdateStatus(volume) -} - -func (c *realControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) { - return c.client.Core().PersistentVolumeClaims(namespace).Get(name) -} - -func (c *realControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) { - return c.client.Core().PersistentVolumeClaims(namespace).List(options) -} - -func (c *realControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) { - return c.client.Core().PersistentVolumeClaims(namespace).Watch(options) -} - -func (c *realControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - return c.client.Core().PersistentVolumeClaims(claim.Namespace).Update(claim) -} - -func (c *realControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - return c.client.Core().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim) -} - -func (c *realControllerClient) GetKubeClient() clientset.Interface { - return c.client -} - -func keyExists(key string, haystack map[string]string) bool { - _, exists := haystack[key] - return exists -} - -func isProvisioningComplete(pv *api.PersistentVolume) bool { - return isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, pv.Annotations) -} - -func isAnnotationMatch(key, needle string, haystack map[string]string) bool { - value, exists := haystack[key] - if !exists { - return false - } - return value == needle -} - -func isRecyclable(policy api.PersistentVolumeReclaimPolicy) bool { - return policy == api.PersistentVolumeReclaimDelete || policy == api.PersistentVolumeReclaimRecycle -} - -// VolumeHost implementation -// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes. -// Because no mounting is performed, most of the VolumeHost methods are not implemented. -func (c *PersistentVolumeProvisionerController) GetPluginDir(podUID string) string { - return "" -} - -func (c *PersistentVolumeProvisionerController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string { - return "" -} - -func (c *PersistentVolumeProvisionerController) GetPodPluginDir(podUID types.UID, pluginName string) string { - return "" -} - -func (c *PersistentVolumeProvisionerController) GetKubeClient() clientset.Interface { - return c.client.GetKubeClient() -} - -func (c *PersistentVolumeProvisionerController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { - return nil, fmt.Errorf("NewWrapperMounter not supported by PVClaimBinder's VolumeHost implementation") -} - -func (c *PersistentVolumeProvisionerController) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) { - return nil, fmt.Errorf("NewWrapperUnmounter not supported by PVClaimBinder's VolumeHost implementation") -} - -func (c *PersistentVolumeProvisionerController) GetCloudProvider() cloudprovider.Interface { - return c.cloud -} - -func (c *PersistentVolumeProvisionerController) GetMounter() mount.Interface { - return nil -} - -func (c *PersistentVolumeProvisionerController) GetWriter() io.Writer { - return nil -} - -func (c *PersistentVolumeProvisionerController) GetHostName() string { - return "" -} - -const ( - // these pair of constants are used by the provisioner. - // The key is a kube namespaced key that denotes a volume requires provisioning. - // The value is set only when provisioning is completed. Any other value will tell the provisioner - // that provisioning has not yet occurred. - pvProvisioningRequiredAnnotationKey = "volume.experimental.kubernetes.io/provisioning-required" - pvProvisioningCompletedAnnotationValue = "volume.experimental.kubernetes.io/provisioning-completed" -) diff --git a/pkg/controller/persistentvolume/persistentvolume_provisioner_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_provisioner_controller_test.go deleted file mode 100644 index c72e8e4473e..00000000000 --- a/pkg/controller/persistentvolume/persistentvolume_provisioner_controller_test.go +++ /dev/null @@ -1,295 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "fmt" - "testing" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/testapi" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" - "k8s.io/kubernetes/pkg/util" - volumetest "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/kubernetes/pkg/watch" -) - -func TestProvisionerRunStop(t *testing.T) { - controller, _, _ := makeTestController() - - if len(controller.stopChannels) != 0 { - t.Errorf("Non-running provisioner should not have any stopChannels. Got %v", len(controller.stopChannels)) - } - - controller.Run() - - if len(controller.stopChannels) != 2 { - t.Errorf("Running provisioner should have exactly 2 stopChannels. Got %v", len(controller.stopChannels)) - } - - controller.Stop() - - if len(controller.stopChannels) != 0 { - t.Errorf("Non-running provisioner should not have any stopChannels. Got %v", len(controller.stopChannels)) - } -} - -func makeTestVolume() *api.PersistentVolume { - return &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ - Annotations: map[string]string{}, - Name: "pv01", - }, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: "/somepath/data01", - }, - }, - }, - } -} - -func makeTestClaim() *api.PersistentVolumeClaim { - return &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{ - Annotations: map[string]string{}, - Name: "claim01", - Namespace: "ns", - SelfLink: testapi.Default.SelfLink("pvc", ""), - }, - Spec: api.PersistentVolumeClaimSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("8G"), - }, - }, - }, - } -} - -func makeTestController() (*PersistentVolumeProvisionerController, *mockControllerClient, *volumetest.FakeVolumePlugin) { - mockClient := &mockControllerClient{} - mockVolumePlugin := &volumetest.FakeVolumePlugin{} - controller, _ := NewPersistentVolumeProvisionerController(mockClient, 1*time.Second, "fake-kubernetes", nil, mockVolumePlugin, &fake_cloud.FakeCloud{}) - return controller, mockClient, mockVolumePlugin -} - -func TestReconcileClaim(t *testing.T) { - controller, mockClient, _ := makeTestController() - pvc := makeTestClaim() - - // watch would have added the claim to the store - controller.claimStore.Add(pvc) - // store it in fake API server - mockClient.UpdatePersistentVolumeClaim(pvc) - - err := controller.reconcileClaim(pvc) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // non-provisionable PVC should not have created a volume on reconciliation - if mockClient.volume != nil { - t.Error("Unexpected volume found in mock client. Expected nil") - } - - pvc.Annotations[qosProvisioningKey] = "foo" - // store it in fake API server - mockClient.UpdatePersistentVolumeClaim(pvc) - - err = controller.reconcileClaim(pvc) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // PVC requesting provisioning should have a PV created for it - if mockClient.volume == nil { - t.Error("Expected to find bound volume but got nil") - } - - if mockClient.volume.Spec.ClaimRef.Name != pvc.Name { - t.Errorf("Expected PV to be bound to %s but got %s", mockClient.volume.Spec.ClaimRef.Name, pvc.Name) - } - - // the PVC should have correct annotation - if mockClient.claim.Annotations[pvProvisioningRequiredAnnotationKey] != pvProvisioningCompletedAnnotationValue { - t.Errorf("Annotation %q not set", pvProvisioningRequiredAnnotationKey) - } - - // Run the syncClaim 2nd time to simulate periodic sweep running in parallel - // to the previous syncClaim. There is a lock in handleUpdateVolume(), so - // they will be called sequentially, but the second call will have old - // version of the claim. - oldPVName := mockClient.volume.Name - - // Make the "old" claim - pvc2 := makeTestClaim() - pvc2.Annotations[qosProvisioningKey] = "foo" - // Add a dummy annotation so we recognize the claim was updated (i.e. - // stored in mockClient) - pvc2.Annotations["test"] = "test" - - err = controller.reconcileClaim(pvc2) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // The 2nd PVC should be ignored, no new PV was created - if val, found := pvc2.Annotations[pvProvisioningRequiredAnnotationKey]; found { - t.Errorf("2nd PVC got unexpected annotation %q: %q", pvProvisioningRequiredAnnotationKey, val) - } - if mockClient.volume.Name != oldPVName { - t.Errorf("2nd PVC unexpectedly provisioned a new volume") - } - if _, found := mockClient.claim.Annotations["test"]; found { - t.Errorf("2nd PVC was unexpectedly updated") - } -} - -func checkTagValue(t *testing.T, tags map[string]string, tag string, expectedValue string) { - value, found := tags[tag] - if !found || value != expectedValue { - t.Errorf("Expected tag value %s = %s but value %s found", tag, expectedValue, value) - } -} - -func TestReconcileVolume(t *testing.T) { - - controller, mockClient, mockVolumePlugin := makeTestController() - pv := makeTestVolume() - pvc := makeTestClaim() - mockClient.volume = pv - - err := controller.reconcileVolume(pv) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - - // watch adds claim to the store. - // we need to add it to our mock client to mimic normal Get call - controller.claimStore.Add(pvc) - mockClient.claim = pvc - - // pretend the claim and volume are bound, no provisioning required - claimRef, _ := api.GetReference(pvc) - pv.Spec.ClaimRef = claimRef - mockClient.volume = pv - err = controller.reconcileVolume(pv) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - - pv.Annotations[pvProvisioningRequiredAnnotationKey] = "!pvProvisioningCompleted" - pv.Annotations[qosProvisioningKey] = "foo" - mockClient.volume = pv - err = controller.reconcileVolume(pv) - - if !isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, mockClient.volume.Annotations) { - t.Errorf("Expected %s but got %s", pvProvisioningRequiredAnnotationKey, mockClient.volume.Annotations[pvProvisioningRequiredAnnotationKey]) - } - - // Check that the volume plugin was called with correct tags - tags := *mockVolumePlugin.LastProvisionerOptions.CloudTags - checkTagValue(t, tags, cloudVolumeCreatedForClaimNamespaceTag, pvc.Namespace) - checkTagValue(t, tags, cloudVolumeCreatedForClaimNameTag, pvc.Name) - checkTagValue(t, tags, cloudVolumeCreatedForVolumeNameTag, pv.Name) - -} - -var _ controllerClient = &mockControllerClient{} - -type mockControllerClient struct { - volume *api.PersistentVolume - claim *api.PersistentVolumeClaim -} - -func (c *mockControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) { - return c.volume, nil -} - -func (c *mockControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) { - if pv.GenerateName != "" && pv.Name == "" { - pv.Name = fmt.Sprintf(pv.GenerateName, util.NewUUID()) - } - c.volume = pv - return c.volume, nil -} - -func (c *mockControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) { - return &api.PersistentVolumeList{ - Items: []api.PersistentVolume{*c.volume}, - }, nil -} - -func (c *mockControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) { - return watch.NewFake(), nil -} - -func (c *mockControllerClient) UpdatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.CreatePersistentVolume(pv) -} - -func (c *mockControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error { - c.volume = nil - return nil -} - -func (c *mockControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return volume, nil -} - -func (c *mockControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) { - if c.claim != nil { - return c.claim, nil - } else { - return nil, errors.NewNotFound(api.Resource("persistentvolumes"), name) - } -} - -func (c *mockControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) { - return &api.PersistentVolumeClaimList{ - Items: []api.PersistentVolumeClaim{*c.claim}, - }, nil -} - -func (c *mockControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) { - return watch.NewFake(), nil -} - -func (c *mockControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - c.claim = claim - return c.claim, nil -} - -func (c *mockControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - return claim, nil -} - -func (c *mockControllerClient) GetKubeClient() clientset.Interface { - return nil -} diff --git a/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go b/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go deleted file mode 100644 index e73a5b9ebc7..00000000000 --- a/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go +++ /dev/null @@ -1,415 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "fmt" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - ioutil "k8s.io/kubernetes/pkg/util/io" - "k8s.io/kubernetes/pkg/util/metrics" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/watch" -) - -var _ volume.VolumeHost = &PersistentVolumeRecycler{} - -// PersistentVolumeRecycler is a controller that watches for PersistentVolumes that are released from their claims. -// This controller will Recycle those volumes whose reclaim policy is set to PersistentVolumeReclaimRecycle and make them -// available again for a new claim. -type PersistentVolumeRecycler struct { - volumeController *framework.Controller - stopChannel chan struct{} - client recyclerClient - kubeClient clientset.Interface - pluginMgr volume.VolumePluginMgr - cloud cloudprovider.Interface - maximumRetry int - syncPeriod time.Duration - // Local cache of failed recycle / delete operations. Map volume.Name -> status of the volume. - // Only PVs in Released state have an entry here. - releasedVolumes map[string]releasedVolumeStatus -} - -// releasedVolumeStatus holds state of failed delete/recycle operation on a -// volume. The controller re-tries the operation several times and it stores -// retry count + timestamp of the last attempt here. -type releasedVolumeStatus struct { - // How many recycle/delete operations failed. - retryCount int - // Timestamp of the last attempt. - lastAttempt time.Time -} - -// NewPersistentVolumeRecycler creates a new PersistentVolumeRecycler -func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time.Duration, maximumRetry int, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) { - recyclerClient := NewRecyclerClient(kubeClient) - if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("pv_recycler_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) - } - recycler := &PersistentVolumeRecycler{ - client: recyclerClient, - kubeClient: kubeClient, - cloud: cloud, - maximumRetry: maximumRetry, - syncPeriod: syncPeriod, - releasedVolumes: make(map[string]releasedVolumeStatus), - } - - if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil { - return nil, fmt.Errorf("Could not initialize volume plugins for PVClaimBinder: %+v", err) - } - - _, volumeController := framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumes().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumes().Watch(options) - }, - }, - &api.PersistentVolume{}, - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - pv, ok := obj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Error casting object to PersistentVolume: %v", obj) - return - } - recycler.reclaimVolume(pv) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - pv, ok := newObj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Error casting object to PersistentVolume: %v", newObj) - return - } - recycler.reclaimVolume(pv) - }, - DeleteFunc: func(obj interface{}) { - pv, ok := obj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Error casting object to PersistentVolume: %v", obj) - return - } - recycler.reclaimVolume(pv) - recycler.removeReleasedVolume(pv) - }, - }, - ) - - recycler.volumeController = volumeController - return recycler, nil -} - -// shouldRecycle checks a volume and returns nil, if the volume should be -// recycled right now. Otherwise it returns an error with reason why it should -// not be recycled. -func (recycler *PersistentVolumeRecycler) shouldRecycle(pv *api.PersistentVolume) error { - if pv.Spec.ClaimRef == nil { - return fmt.Errorf("Volume does not have a reference to claim") - } - if pv.Status.Phase != api.VolumeReleased { - return fmt.Errorf("The volume is not in 'Released' phase") - } - - // The volume is Released, should we retry recycling? - status, found := recycler.releasedVolumes[pv.Name] - if !found { - // We don't know anything about this volume. The controller has been - // restarted or the volume has been marked as Released by another - // controller. Recycle/delete this volume as if it was just Released. - glog.V(5).Infof("PersistentVolume[%s] not found in local cache, recycling", pv.Name) - return nil - } - - // Check the timestamp - expectedRetry := status.lastAttempt.Add(recycler.syncPeriod) - if time.Now().After(expectedRetry) { - glog.V(5).Infof("PersistentVolume[%s] retrying recycle after timeout", pv.Name) - return nil - } - // It's too early - glog.V(5).Infof("PersistentVolume[%s] skipping recycle, it's too early: now: %v, next retry: %v", pv.Name, time.Now(), expectedRetry) - return fmt.Errorf("Too early after previous failure") -} - -func (recycler *PersistentVolumeRecycler) reclaimVolume(pv *api.PersistentVolume) error { - glog.V(5).Infof("Recycler: checking PersistentVolume[%s]\n", pv.Name) - // Always load the latest version of the volume - newPV, err := recycler.client.GetPersistentVolume(pv.Name) - if err != nil { - return fmt.Errorf("Could not find PersistentVolume %s", pv.Name) - } - pv = newPV - - err = recycler.shouldRecycle(pv) - if err == nil { - glog.V(5).Infof("Reclaiming PersistentVolume[%s]\n", pv.Name) - - // both handleRecycle and handleDelete block until completion - // TODO: allow parallel recycling operations to increase throughput - switch pv.Spec.PersistentVolumeReclaimPolicy { - case api.PersistentVolumeReclaimRecycle: - err = recycler.handleRecycle(pv) - case api.PersistentVolumeReclaimDelete: - err = recycler.handleDelete(pv) - case api.PersistentVolumeReclaimRetain: - glog.V(5).Infof("Volume %s is set to retain after release. Skipping.\n", pv.Name) - default: - err = fmt.Errorf("No PersistentVolumeReclaimPolicy defined for spec: %+v", pv) - } - if err != nil { - errMsg := fmt.Sprintf("Could not recycle volume spec: %+v", err) - glog.Errorf(errMsg) - return fmt.Errorf(errMsg) - } - return nil - } - glog.V(3).Infof("PersistentVolume[%s] phase %s - skipping: %v", pv.Name, pv.Status.Phase, err) - return nil -} - -// handleReleaseFailure evaluates a failed Recycle/Delete operation, updates -// internal controller state with new nr. of attempts and timestamp of the last -// attempt. Based on the number of failures it returns the next state of the -// volume (Released / Failed). -func (recycler *PersistentVolumeRecycler) handleReleaseFailure(pv *api.PersistentVolume) api.PersistentVolumePhase { - status, found := recycler.releasedVolumes[pv.Name] - if !found { - // First failure, set retryCount to 0 (will be inceremented few lines below) - status = releasedVolumeStatus{} - } - status.retryCount += 1 - - if status.retryCount > recycler.maximumRetry { - // This was the last attempt. Remove any internal state and mark the - // volume as Failed. - glog.V(3).Infof("PersistentVolume[%s] failed %d times - marking Failed", pv.Name, status.retryCount) - recycler.removeReleasedVolume(pv) - return api.VolumeFailed - } - - status.lastAttempt = time.Now() - recycler.releasedVolumes[pv.Name] = status - return api.VolumeReleased -} - -func (recycler *PersistentVolumeRecycler) removeReleasedVolume(pv *api.PersistentVolume) { - delete(recycler.releasedVolumes, pv.Name) -} - -func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume) error { - glog.V(5).Infof("Recycling PersistentVolume[%s]\n", pv.Name) - - currentPhase := pv.Status.Phase - nextPhase := currentPhase - - spec := volume.NewSpecFromPersistentVolume(pv, false) - plugin, err := recycler.pluginMgr.FindRecyclablePluginBySpec(spec) - if err != nil { - nextPhase = api.VolumeFailed - pv.Status.Message = fmt.Sprintf("%v", err) - } - - // an error above means a suitable plugin for this volume was not found. - // we don't need to attempt recycling when plugin is nil, but we do need to persist the next/failed phase - // of the volume so that subsequent syncs won't attempt recycling through this handler func. - if plugin != nil { - volRecycler, err := plugin.NewRecycler(spec) - if err != nil { - return fmt.Errorf("Could not obtain Recycler for spec: %#v error: %v", spec, err) - } - // blocks until completion - if err := volRecycler.Recycle(); err != nil { - glog.Errorf("PersistentVolume[%s] failed recycling: %+v", pv.Name, err) - pv.Status.Message = fmt.Sprintf("Recycling error: %s", err) - nextPhase = recycler.handleReleaseFailure(pv) - } else { - glog.V(5).Infof("PersistentVolume[%s] successfully recycled\n", pv.Name) - // The volume has been recycled. Remove any internal state to make - // any subsequent bind+recycle cycle working. - recycler.removeReleasedVolume(pv) - nextPhase = api.VolumePending - } - } - - if currentPhase != nextPhase { - glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase) - pv.Status.Phase = nextPhase - _, err := recycler.client.UpdatePersistentVolumeStatus(pv) - if err != nil { - // Rollback to previous phase - pv.Status.Phase = currentPhase - } - } - - return nil -} - -func (recycler *PersistentVolumeRecycler) handleDelete(pv *api.PersistentVolume) error { - glog.V(5).Infof("Deleting PersistentVolume[%s]\n", pv.Name) - - currentPhase := pv.Status.Phase - nextPhase := currentPhase - - spec := volume.NewSpecFromPersistentVolume(pv, false) - plugin, err := recycler.pluginMgr.FindDeletablePluginBySpec(spec) - if err != nil { - nextPhase = api.VolumeFailed - pv.Status.Message = fmt.Sprintf("%v", err) - } - - // an error above means a suitable plugin for this volume was not found. - // we don't need to attempt deleting when plugin is nil, but we do need to persist the next/failed phase - // of the volume so that subsequent syncs won't attempt deletion through this handler func. - if plugin != nil { - deleter, err := plugin.NewDeleter(spec) - if err != nil { - return fmt.Errorf("Could not obtain Deleter for spec: %#v error: %v", spec, err) - } - // blocks until completion - err = deleter.Delete() - if err != nil { - glog.Errorf("PersistentVolume[%s] failed deletion: %+v", pv.Name, err) - pv.Status.Message = fmt.Sprintf("Deletion error: %s", err) - nextPhase = recycler.handleReleaseFailure(pv) - } else { - glog.V(5).Infof("PersistentVolume[%s] successfully deleted through plugin\n", pv.Name) - recycler.removeReleasedVolume(pv) - // after successful deletion through the plugin, we can also remove the PV from the cluster - if err := recycler.client.DeletePersistentVolume(pv); err != nil { - return fmt.Errorf("error deleting persistent volume: %+v", err) - } - } - } - - if currentPhase != nextPhase { - glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase) - pv.Status.Phase = nextPhase - _, err := recycler.client.UpdatePersistentVolumeStatus(pv) - if err != nil { - // Rollback to previous phase - pv.Status.Phase = currentPhase - } - } - - return nil -} - -// Run starts this recycler's control loops -func (recycler *PersistentVolumeRecycler) Run() { - glog.V(5).Infof("Starting PersistentVolumeRecycler\n") - if recycler.stopChannel == nil { - recycler.stopChannel = make(chan struct{}) - go recycler.volumeController.Run(recycler.stopChannel) - } -} - -// Stop gracefully shuts down this binder -func (recycler *PersistentVolumeRecycler) Stop() { - glog.V(5).Infof("Stopping PersistentVolumeRecycler\n") - if recycler.stopChannel != nil { - close(recycler.stopChannel) - recycler.stopChannel = nil - } -} - -// recyclerClient abstracts access to PVs -type recyclerClient interface { - GetPersistentVolume(name string) (*api.PersistentVolume, error) - UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) - DeletePersistentVolume(volume *api.PersistentVolume) error - UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) -} - -func NewRecyclerClient(c clientset.Interface) recyclerClient { - return &realRecyclerClient{c} -} - -type realRecyclerClient struct { - client clientset.Interface -} - -func (c *realRecyclerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Get(name) -} - -func (c *realRecyclerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().Update(volume) -} - -func (c *realRecyclerClient) DeletePersistentVolume(volume *api.PersistentVolume) error { - return c.client.Core().PersistentVolumes().Delete(volume.Name, nil) -} - -func (c *realRecyclerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) { - return c.client.Core().PersistentVolumes().UpdateStatus(volume) -} - -// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes. -// Because no mounting is performed, most of the VolumeHost methods are not implemented. -func (f *PersistentVolumeRecycler) GetPluginDir(podUID string) string { - return "" -} - -func (f *PersistentVolumeRecycler) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string { - return "" -} - -func (f *PersistentVolumeRecycler) GetPodPluginDir(podUID types.UID, pluginName string) string { - return "" -} - -func (f *PersistentVolumeRecycler) GetKubeClient() clientset.Interface { - return f.kubeClient -} - -func (f *PersistentVolumeRecycler) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { - return nil, fmt.Errorf("NewWrapperMounter not supported by PVClaimBinder's VolumeHost implementation") -} - -func (f *PersistentVolumeRecycler) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) { - return nil, fmt.Errorf("NewWrapperUnmounter not supported by PVClaimBinder's VolumeHost implementation") -} - -func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface { - return f.cloud -} - -func (f *PersistentVolumeRecycler) GetMounter() mount.Interface { - return nil -} - -func (f *PersistentVolumeRecycler) GetWriter() ioutil.Writer { - return nil -} - -func (f *PersistentVolumeRecycler) GetHostName() string { - return "" -} diff --git a/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go deleted file mode 100644 index 8312fd32210..00000000000 --- a/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go +++ /dev/null @@ -1,265 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package persistentvolume - -import ( - "fmt" - "testing" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/host_path" - volumetest "k8s.io/kubernetes/pkg/volume/testing" -) - -const ( - mySyncPeriod = 2 * time.Second - myMaximumRetry = 3 -) - -func TestFailedRecycling(t *testing.T) { - pv := preparePV() - - mockClient := &mockBinderClient{ - volume: pv, - } - - // no Init called for pluginMgr and no plugins are available. Volume should fail recycling. - plugMgr := volume.VolumePluginMgr{} - - recycler := &PersistentVolumeRecycler{ - kubeClient: fake.NewSimpleClientset(), - client: mockClient, - pluginMgr: plugMgr, - releasedVolumes: make(map[string]releasedVolumeStatus), - } - - err := recycler.reclaimVolume(pv) - if err != nil { - t.Errorf("Unexpected non-nil error: %v", err) - } - - if mockClient.volume.Status.Phase != api.VolumeFailed { - t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase) - } - - // Use a new volume for the next test - pv = preparePV() - mockClient.volume = pv - - pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete - err = recycler.reclaimVolume(pv) - if err != nil { - t.Errorf("Unexpected non-nil error: %v", err) - } - - if mockClient.volume.Status.Phase != api.VolumeFailed { - t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase) - } -} - -func TestRecyclingRetry(t *testing.T) { - // Test that recycler controller retries to recycle a volume several times, which succeeds eventually - pv := preparePV() - - mockClient := &mockBinderClient{ - volume: pv, - } - - plugMgr := volume.VolumePluginMgr{} - // Use a fake NewRecycler function - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newFailingMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)) - // Reset a global call counter - failedCallCount = 0 - - recycler := &PersistentVolumeRecycler{ - kubeClient: fake.NewSimpleClientset(), - client: mockClient, - pluginMgr: plugMgr, - syncPeriod: mySyncPeriod, - maximumRetry: myMaximumRetry, - releasedVolumes: make(map[string]releasedVolumeStatus), - } - - // All but the last attempt will fail - testRecycleFailures(t, recycler, mockClient, pv, myMaximumRetry-1) - - // The last attempt should succeed - err := recycler.reclaimVolume(pv) - if err != nil { - t.Errorf("Last step: Recycler failed: %v", err) - } - - if mockClient.volume.Status.Phase != api.VolumePending { - t.Errorf("Last step: The volume should be Pending, but is %s instead", mockClient.volume.Status.Phase) - } - // Check the cache, it should not have any entry - status, found := recycler.releasedVolumes[pv.Name] - if found { - t.Errorf("Last step: Expected PV to be removed from cache, got %v", status) - } -} - -func TestRecyclingRetryAlwaysFail(t *testing.T) { - // Test that recycler controller retries to recycle a volume several times, which always fails. - pv := preparePV() - - mockClient := &mockBinderClient{ - volume: pv, - } - - plugMgr := volume.VolumePluginMgr{} - // Use a fake NewRecycler function - plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newAlwaysFailingMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)) - // Reset a global call counter - failedCallCount = 0 - - recycler := &PersistentVolumeRecycler{ - kubeClient: fake.NewSimpleClientset(), - client: mockClient, - pluginMgr: plugMgr, - syncPeriod: mySyncPeriod, - maximumRetry: myMaximumRetry, - releasedVolumes: make(map[string]releasedVolumeStatus), - } - - // myMaximumRetry recycle attempts will fail - testRecycleFailures(t, recycler, mockClient, pv, myMaximumRetry) - - // The volume should be failed after myMaximumRetry attempts - err := recycler.reclaimVolume(pv) - if err != nil { - t.Errorf("Last step: Recycler failed: %v", err) - } - - if mockClient.volume.Status.Phase != api.VolumeFailed { - t.Errorf("Last step: The volume should be Failed, but is %s instead", mockClient.volume.Status.Phase) - } - // Check the cache, it should not have any entry - status, found := recycler.releasedVolumes[pv.Name] - if found { - t.Errorf("Last step: Expected PV to be removed from cache, got %v", status) - } -} - -func preparePV() *api.PersistentVolume { - return &api.PersistentVolume{ - Spec: api.PersistentVolumeSpec{ - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"), - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: "/tmp/data02", - }, - }, - PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle, - ClaimRef: &api.ObjectReference{ - Name: "foo", - Namespace: "bar", - }, - }, - Status: api.PersistentVolumeStatus{ - Phase: api.VolumeReleased, - }, - } -} - -// Test that `count` attempts to recycle a PV fails. -func testRecycleFailures(t *testing.T, recycler *PersistentVolumeRecycler, mockClient *mockBinderClient, pv *api.PersistentVolume, count int) { - for i := 1; i <= count; i++ { - err := recycler.reclaimVolume(pv) - if err != nil { - t.Errorf("STEP %d: Recycler faled: %v", i, err) - } - - // Check the status, it should be failed - if mockClient.volume.Status.Phase != api.VolumeReleased { - t.Errorf("STEP %d: The volume should be Released, but is %s instead", i, mockClient.volume.Status.Phase) - } - - // Check the failed volume cache - status, found := recycler.releasedVolumes[pv.Name] - if !found { - t.Errorf("STEP %d: cannot find released volume status", i) - } - if status.retryCount != i { - t.Errorf("STEP %d: Expected nr. of attempts to be %d, got %d", i, i, status.retryCount) - } - - // call reclaimVolume too early, it should not increment the retryCount - time.Sleep(mySyncPeriod / 2) - err = recycler.reclaimVolume(pv) - if err != nil { - t.Errorf("STEP %d: Recycler failed: %v", i, err) - } - - status, found = recycler.releasedVolumes[pv.Name] - if !found { - t.Errorf("STEP %d: cannot find released volume status", i) - } - if status.retryCount != i { - t.Errorf("STEP %d: Expected nr. of attempts to be %d, got %d", i, i, status.retryCount) - } - - // Call the next reclaimVolume() after full pvRecycleRetryPeriod - time.Sleep(mySyncPeriod / 2) - } -} - -func newFailingMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) { - return &failingMockRecycler{ - path: spec.PersistentVolume.Spec.HostPath.Path, - errorCount: myMaximumRetry - 1, // fail two times and then successfully recycle the volume - }, nil -} - -func newAlwaysFailingMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) { - return &failingMockRecycler{ - path: spec.PersistentVolume.Spec.HostPath.Path, - errorCount: 1000, // always fail - }, nil -} - -type failingMockRecycler struct { - path string - // How many times should the recycler fail before returning success. - errorCount int - volume.MetricsNil -} - -// Counter of failingMockRecycler.Recycle() calls. Global variable just for -// testing. It's too much code to create a custom volume plugin, which would -// hold this variable. -var failedCallCount = 0 - -func (r *failingMockRecycler) GetPath() string { - return r.path -} - -func (r *failingMockRecycler) Recycle() error { - failedCallCount += 1 - if failedCallCount <= r.errorCount { - return fmt.Errorf("Failing for %d. time", failedCallCount) - } - // return nil means recycle passed - return nil -} From b86e5923b20bd1057daae0a76f7472c6a214aebe Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:03 +0200 Subject: [PATCH 02/34] Rename types.go to persistentvolume_index.go With some changes: - make some method private, nobody seems to use them. - adapt to framework.NewIndexerInformer instead of using custom cache. --- .../{types.go => persistentvolume_index.go} | 49 ++++++------------- .../persistentvolume_index_test.go | 43 +++++++++------- 2 files changed, 38 insertions(+), 54 deletions(-) rename pkg/controller/persistentvolume/{types.go => persistentvolume_index.go} (78%) diff --git a/pkg/controller/persistentvolume/types.go b/pkg/controller/persistentvolume/persistentvolume_index.go similarity index 78% rename from pkg/controller/persistentvolume/types.go rename to pkg/controller/persistentvolume/persistentvolume_index.go index 42ca3680180..c323ef49744 100644 --- a/pkg/controller/persistentvolume/types.go +++ b/pkg/controller/persistentvolume/persistentvolume_index.go @@ -24,34 +24,9 @@ import ( "k8s.io/kubernetes/pkg/client/cache" ) -const ( - // A PVClaim can request a quality of service tier by adding this annotation. The value of the annotation - // is arbitrary. The values are pre-defined by a cluster admin and known to users when requesting a QoS. - // For example tiers might be gold, silver, and tin and the admin configures what that means for each volume plugin that can provision a volume. - // Values in the alpha version of this feature are not meaningful, but will be in the full version of this feature. - qosProvisioningKey = "volume.alpha.kubernetes.io/storage-class" - // Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) - // with namespace of a persistent volume claim used to create this volume. - cloudVolumeCreatedForClaimNamespaceTag = "kubernetes.io/created-for/pvc/namespace" - // Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) - // with name of a persistent volume claim used to create this volume. - cloudVolumeCreatedForClaimNameTag = "kubernetes.io/created-for/pvc/name" - // Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) - // with name of appropriate Kubernetes persistent volume . - cloudVolumeCreatedForVolumeNameTag = "kubernetes.io/created-for/pv/name" -) - // persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity. type persistentVolumeOrderedIndex struct { - cache.Indexer -} - -var _ cache.Store = &persistentVolumeOrderedIndex{} // persistentVolumeOrderedIndex is a Store - -func NewPersistentVolumeOrderedIndex() *persistentVolumeOrderedIndex { - return &persistentVolumeOrderedIndex{ - cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc}), - } + store cache.Indexer } // accessModesIndexFunc is an indexing function that returns a persistent volume's AccessModes as a string @@ -63,15 +38,15 @@ func accessModesIndexFunc(obj interface{}) ([]string, error) { return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj) } -// ListByAccessModes returns all volumes with the given set of AccessModeTypes *in order* of their storage capacity (low to high) -func (pvIndex *persistentVolumeOrderedIndex) ListByAccessModes(modes []api.PersistentVolumeAccessMode) ([]*api.PersistentVolume, error) { +// listByAccessModes returns all volumes with the given set of AccessModeTypes *in order* of their storage capacity (low to high) +func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []api.PersistentVolumeAccessMode) ([]*api.PersistentVolume, error) { pv := &api.PersistentVolume{ Spec: api.PersistentVolumeSpec{ AccessModes: modes, }, } - objs, err := pvIndex.Index("accessmodes", pv) + objs, err := pvIndex.store.Index("accessmodes", pv) if err != nil { return nil, err } @@ -101,7 +76,7 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes) for _, modes := range allPossibleModes { - volumes, err := pvIndex.ListByAccessModes(modes) + volumes, err := pvIndex.listByAccessModes(modes) if err != nil { return nil, err } @@ -123,10 +98,14 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo } } - // a claim requesting provisioning will have an exact match pre-bound to the claim. - // no need to search through unbound volumes. The matching volume will be created by the provisioner - // and will match above when the claim is re-processed by the binder. - if keyExists(qosProvisioningKey, claim.Annotations) { + // We want to provision volumes if the annotation is set even if there + // is matching PV. Therefore, do not look for available PV and let + // a new volume to be provisioned. + // + // When provisioner creates a new PV to this claim, an exact match + // pre-bound to the claim will be found by the checks above during + // subsequent claim sync. + if hasAnnotation(claim.ObjectMeta, annClass) { return nil, nil } @@ -213,7 +192,7 @@ func matchStorageCapacity(pvA, pvB *api.PersistentVolume) bool { // func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []api.PersistentVolumeAccessMode) [][]api.PersistentVolumeAccessMode { matchedModes := [][]api.PersistentVolumeAccessMode{} - keys := pvIndex.Indexer.ListIndexFuncValues("accessmodes") + keys := pvIndex.store.ListIndexFuncValues("accessmodes") for _, key := range keys { indexedModes := api.GetAccessModesFromString(key) if containedInAll(indexedModes, requestedModes) { diff --git a/pkg/controller/persistentvolume/persistentvolume_index_test.go b/pkg/controller/persistentvolume/persistentvolume_index_test.go index 7bb6c5387bd..61134f13700 100644 --- a/pkg/controller/persistentvolume/persistentvolume_index_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_index_test.go @@ -22,12 +22,17 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" ) +func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex { + return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})} +} + func TestMatchVolume(t *testing.T) { - volList := NewPersistentVolumeOrderedIndex() + volList := newPersistentVolumeOrderedIndex() for _, pv := range createTestVolumes() { - volList.Add(pv) + volList.store.Add(pv) } scenarios := map[string]struct { @@ -122,7 +127,7 @@ func TestMatchVolume(t *testing.T) { } func TestMatchingWithBoundVolumes(t *testing.T) { - volumeIndex := NewPersistentVolumeOrderedIndex() + volumeIndex := newPersistentVolumeOrderedIndex() // two similar volumes, one is bound pv1 := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ @@ -158,8 +163,8 @@ func TestMatchingWithBoundVolumes(t *testing.T) { }, } - volumeIndex.Add(pv1) - volumeIndex.Add(pv2) + volumeIndex.store.Add(pv1) + volumeIndex.store.Add(pv2) claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ @@ -189,12 +194,12 @@ func TestMatchingWithBoundVolumes(t *testing.T) { } func TestSort(t *testing.T) { - volList := NewPersistentVolumeOrderedIndex() + volList := newPersistentVolumeOrderedIndex() for _, pv := range createTestVolumes() { - volList.Add(pv) + volList.store.Add(pv) } - volumes, err := volList.ListByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}) + volumes, err := volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}) if err != nil { t.Error("Unexpected error retrieving volumes by access modes:", err) } @@ -205,7 +210,7 @@ func TestSort(t *testing.T) { } } - volumes, err = volList.ListByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany}) + volumes, err = volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany}) if err != nil { t.Error("Unexpected error retrieving volumes by access modes:", err) } @@ -218,9 +223,9 @@ func TestSort(t *testing.T) { } func TestAllPossibleAccessModes(t *testing.T) { - index := NewPersistentVolumeOrderedIndex() + index := newPersistentVolumeOrderedIndex() for _, pv := range createTestVolumes() { - index.Add(pv) + index.store.Add(pv) } // the mock PVs creates contain 2 types of accessmodes: RWO+ROX and RWO+ROW+RWX @@ -292,10 +297,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) { }, } - index := NewPersistentVolumeOrderedIndex() - index.Add(gce) - index.Add(ebs) - index.Add(nfs) + index := newPersistentVolumeOrderedIndex() + index.store.Add(gce) + index.store.Add(ebs) + index.store.Add(nfs) volume, _ := index.findBestMatchForClaim(claim) if volume.Name != ebs.Name { @@ -521,10 +526,10 @@ func TestFindingPreboundVolumes(t *testing.T) { pv5 := testVolume("pv5", "5Gi") pv8 := testVolume("pv8", "8Gi") - index := NewPersistentVolumeOrderedIndex() - index.Add(pv1) - index.Add(pv5) - index.Add(pv8) + index := newPersistentVolumeOrderedIndex() + index.store.Add(pv1) + index.store.Add(pv5) + index.store.Add(pv8) // expected exact match on size volume, _ := index.findBestMatchForClaim(claim) From 71aa892a86300716c94ac43a45d6a76f3d06f063 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:04 +0200 Subject: [PATCH 03/34] Implement volume controller skeleton. This is a simple controller that watches changes of PersistentVolumes and PersistentVolumeClaims. --- .../app/controllermanager.go | 25 +- .../controllermanager/controllermanager.go | 24 +- .../persistentvolume_controller.go | 244 ++++++++++++++++++ 3 files changed, 251 insertions(+), 42 deletions(-) create mode 100644 pkg/controller/persistentvolume/persistentvolume_controller.go diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index fb6d54355f9..0f5c46b5d77 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -373,38 +373,21 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig } } - volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration) provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration) if err != nil { glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") } - pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration) - pvclaimBinder.Run() - time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) - - pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler( - clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")), + volumeController := persistentvolumecontroller.NewPersistentVolumeController( + clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry), + provisioner, ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, ) - if err != nil { - glog.Fatalf("Failed to start persistent volume recycler: %+v", err) - } - pvRecycler.Run() + volumeController.Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) - if provisioner != nil { - pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud) - if err != nil { - glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err) - } - pvController.Run() - time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) - } - go volume.NewAttachDetachController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, ResyncPeriod(s)()). Run(wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index ea7cc158f33..46d9c48ea9e 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -271,37 +271,19 @@ func (s *CMServer) Run(_ []string) error { } } - volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration) provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration) if err != nil { glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") } - pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder( + volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, - ) - pvclaimBinder.Run() - - pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler( - clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")), - s.PVClaimBinderSyncPeriod.Duration, - int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry), + provisioner, kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, ) - if err != nil { - glog.Fatalf("Failed to start persistent volume recycler: %+v", err) - } - pvRecycler.Run() - - if provisioner != nil { - pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud) - if err != nil { - glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err) - } - pvController.Run() - } + volumeController.Run() var rootCA []byte diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go new file mode 100644 index 00000000000..c7f40a15adc --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + vol "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" +) + +// PersistentVolumeController is a controller that synchronizes +// PersistentVolumeClaims and PersistentVolumes. It starts two +// framework.Controllers that watch PerstentVolume and PersistentVolumeClaim +// changes. +type PersistentVolumeController struct { + volumes persistentVolumeOrderedIndex + volumeController *framework.Controller + volumeControllerStopCh chan struct{} + claims cache.Store + claimController *framework.Controller + claimControllerStopCh chan struct{} + kubeClient clientset.Interface +} + +// NewPersistentVolumeController creates a new PersistentVolumeController +func NewPersistentVolumeController( + kubeClient clientset.Interface, + syncPeriod time.Duration, + provisioner vol.ProvisionableVolumePlugin, + recyclers []vol.VolumePlugin, + cloud cloudprovider.Interface) *PersistentVolumeController { + + controller := &PersistentVolumeController{ + kubeClient: kubeClient, + } + + volumeSource := &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumes().Watch(options) + }, + } + + claimSource := &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) + }, + } + + controller.initializeController(syncPeriod, volumeSource, claimSource) + + return controller +} + +// initializeController prepares watching for PersistentVolume and +// PersistentVolumeClaim events from given sources. This should be used to +// initialize the controller for real operation (with real event sources) and +// also during testing (with fake ones). +func (ctrl *PersistentVolumeController) initializeController(syncPeriod time.Duration, volumeSource, claimSource cache.ListerWatcher) { + glog.V(4).Infof("initializing PersistentVolumeController, sync every %s", syncPeriod.String()) + ctrl.volumes.store, ctrl.volumeController = framework.NewIndexerInformer( + volumeSource, + &api.PersistentVolume{}, + syncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addVolume, + UpdateFunc: ctrl.updateVolume, + DeleteFunc: ctrl.deleteVolume, + }, + cache.Indexers{"accessmodes": accessModesIndexFunc}, + ) + ctrl.claims, ctrl.claimController = framework.NewInformer( + claimSource, + &api.PersistentVolumeClaim{}, + syncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addClaim, + UpdateFunc: ctrl.updateClaim, + DeleteFunc: ctrl.deleteClaim, + }, + ) +} + +// addVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { + pv, ok := obj.(*api.PersistentVolume) + if !ok { + glog.Errorf("expected PersistentVolume but handler received %+v", obj) + return + } + if err := ctrl.syncVolume(pv); err != nil { + glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } +} + +// updateVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { + newVolume, ok := newObj.(*api.PersistentVolume) + if !ok { + glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) + return + } + if err := ctrl.syncVolume(newVolume); err != nil { + glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } +} + +// deleteVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { + // Intentionally left blank - we do not react on deleted volumes +} + +// addClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { + claim, ok := obj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj) + return + } + if err := ctrl.syncClaim(claim); err != nil { + glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } +} + +// updateClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { + newClaim, ok := newObj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) + return + } + if err := ctrl.syncClaim(newClaim); err != nil { + glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } +} + +// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { + var volume *api.PersistentVolume + var claim *api.PersistentVolumeClaim + var ok bool + + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) + return + } + } else { + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) + return + } + } + + if !ok || claim == nil { + return + } + + if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists { + if volume, ok = pvObj.(*api.PersistentVolume); ok { + // sync the volume when its claim is deleted. Explicitly sync'ing the + // volume here in response to claim deletion prevents the volume from + // waiting until the next sync period for its Release. + if volume != nil { + err := ctrl.syncVolume(volume) + if err != nil { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } + } + } else { + glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) + } + } +} + +func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("synchronizing PersistentVolume[%s], current phase: %s", volume.Name, volume.Status.Phase) + + return nil +} + +func (ctrl *PersistentVolumeController) syncClaim(claim *api.PersistentVolumeClaim) error { + glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s], current phase: %s", claim.Name, claim.Status.Phase) + + return nil +} + +// Run starts all of this controller's control loops +func (ctrl *PersistentVolumeController) Run() { + glog.V(4).Infof("starting PersistentVolumeController") + + if ctrl.volumeControllerStopCh == nil { + ctrl.volumeControllerStopCh = make(chan struct{}) + go ctrl.volumeController.Run(ctrl.volumeControllerStopCh) + } + + if ctrl.claimControllerStopCh == nil { + ctrl.claimControllerStopCh = make(chan struct{}) + go ctrl.claimController.Run(ctrl.claimControllerStopCh) + } +} + +// Stop gracefully shuts down this controller +func (ctrl *PersistentVolumeController) Stop() { + glog.V(4).Infof("stopping PersistentVolumeController") + close(ctrl.volumeControllerStopCh) + close(ctrl.claimControllerStopCh) +} From 20305f9235a738ecfaaab74da117bd5ad9f41b55 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:05 +0200 Subject: [PATCH 04/34] Don't process events until fully initialized. We do not want to process any volume / claim events until both PV and claim caches are fully loaded. --- .../persistentvolume_controller.go | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index c7f40a15adc..485a92dd4f2 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -112,6 +112,10 @@ func (ctrl *PersistentVolumeController) initializeController(syncPeriod time.Dur // addVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + pv, ok := obj.(*api.PersistentVolume) if !ok { glog.Errorf("expected PersistentVolume but handler received %+v", obj) @@ -125,6 +129,10 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { // updateVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { + if !ctrl.isFullySynced() { + return + } + newVolume, ok := newObj.(*api.PersistentVolume) if !ok { glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) @@ -144,6 +152,10 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { // addClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + claim, ok := obj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj) @@ -157,6 +169,10 @@ func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { // updateClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { + if !ctrl.isFullySynced() { + return + } + newClaim, ok := newObj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) @@ -170,6 +186,10 @@ func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) // deleteClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + var volume *api.PersistentVolume var claim *api.PersistentVolumeClaim var ok bool @@ -242,3 +262,11 @@ func (ctrl *PersistentVolumeController) Stop() { close(ctrl.volumeControllerStopCh) close(ctrl.claimControllerStopCh) } + +// isFullySynced returns true, if both volume and claim caches are fully loaded +// after startup. +// We do not want to process events with not fully loaded caches - e.g. we might +// recycle/delete PVs that don't have corresponding claim in the cache yet. +func (ctrl *PersistentVolumeController) isFullySynced() bool { + return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() +} From a195802d3ee778bcdbd0ab8e22cb35cce66b2a62 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:06 +0200 Subject: [PATCH 05/34] Make standalone function to check for (pre-)bound volumes. Note the semantic change, we now check for UID="" --- .../persistentvolume_controller.go | 16 ++++++++++++++++ .../persistentvolume/persistentvolume_index.go | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 485a92dd4f2..1cda746f289 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -270,3 +270,19 @@ func (ctrl *PersistentVolumeController) Stop() { func (ctrl *PersistentVolumeController) isFullySynced() bool { return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() } + +// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound +// to specific claim. Both claim.Name and claim.Namespace must be equal. +// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. +func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool { + if volume.Spec.ClaimRef == nil { + return false + } + if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { + return false + } + if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { + return false + } + return true +} diff --git a/pkg/controller/persistentvolume/persistentvolume_index.go b/pkg/controller/persistentvolume/persistentvolume_index.go index c323ef49744..a86bf7c2b45 100644 --- a/pkg/controller/persistentvolume/persistentvolume_index.go +++ b/pkg/controller/persistentvolume/persistentvolume_index.go @@ -92,8 +92,8 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo continue } - if claim.Name == volume.Spec.ClaimRef.Name && claim.Namespace == volume.Spec.ClaimRef.Namespace && claim.UID == volume.Spec.ClaimRef.UID { - // exact match! No search required. + if isVolumeBoundToClaim(volume, claim) { + // Exact match! No search required. return volume, nil } } From e620bfc9ccb9e4f4d838d5bd0d0c3c543ab01ba7 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:07 +0200 Subject: [PATCH 06/34] Add unit test framework. It's quite complicated one, see subsequent commits for usage. --- .../persistentvolume_framework_test.go | 463 ++++++++++++++++++ 1 file changed, 463 insertions(+) create mode 100644 pkg/controller/persistentvolume/persistentvolume_framework_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go new file mode 100644 index 00000000000..17727a31bc1 --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -0,0 +1,463 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" + "testing" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/diff" +) + +// This is a unit test framework for persistent volume controller. +// It fills the controller with test claims/volumes and can simulate these +// scenarios: +// 1) Call syncClaim/syncVolume once. +// 2) Call syncClaim/syncVolume several times (both simulating "claim/volume +// modified" events and periodic sync), until the controller settles down and +// does not modify anything. +// 3) Simulate almost real API server/etcd and call add/update/delete +// volume/claim. +// In all these scenarios, when the test finishes, the framework can compare +// resulting claims/volumes with list of expected claims/volumes and report +// differences. + +// controllerTest contains a single controller test input. +// Each test has initial set of volumes and claims that are filled into the +// controller before the test starts. The test then contains a reference to +// function to call as the actual test. Available functions are: +// - testSyncClaim - calls syncClaim on the first claim in initialClaims. +// - testSyncClaimError - calls syncClaim on the first claim in initialClaims +// and expects an error to be returned. +// - testSyncVolume - calls syncVolume on the first volume in initialVolumes. +// - any custom function for specialized tests. +// The test then contains list of volumes/claims that are expected at the end +// of the test. +type controllerTest struct { + // Name of the test, for logging + name string + // Initial content of controller volume cache. + initialVolumes []*api.PersistentVolume + // Expected content of controller volume cache at the end of the test. + expectedVolumes []*api.PersistentVolume + // Initial content of controller claim cache. + initialClaims []*api.PersistentVolumeClaim + // Expected content of controller claim cache at the end of the test. + expectedClaims []*api.PersistentVolumeClaim + // Function to call as the test. + test testCall +} + +type testCall func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error + +const testNamespace = "default" + +var versionConflictError = errors.New("VersionError") +var novolumes []*api.PersistentVolume +var noclaims []*api.PersistentVolumeClaim + +// volumeReactor is a core.Reactor that simulates etcd and API server. It +// stores: +// - Latest version of claims volumes saved by the controller. +// - Queue of all saves (to simulate "volume/claim updated" events). This queue +// contains all intermediate state of an object - e.g. a claim.VolumeName +// is updated first and claim.Phase second. This queue will then contain both +// updates as separate entries. +// - Number of changes since the last call to volumeReactor.syncAll(). +// - Optionally, volume and claim event sources. When set, all changed +// volumes/claims are sent as Modify event to these sources. These sources can +// be linked back to the controller watcher as "volume/claim updated" events. +type volumeReactor struct { + volumes map[string]*api.PersistentVolume + claims map[string]*api.PersistentVolumeClaim + changedObjects []interface{} + changedSinceLastSync int + ctrl *PersistentVolumeController + volumeSource *framework.FakeControllerSource + claimSource *framework.FakeControllerSource + lock sync.Mutex +} + +// React is a callback called by fake kubeClient from the controller. +// In other words, every claim/volume change performed by the controller ends +// here. +// This callback checks versions of the updated objects and refuse those that +// are too old (simulating real etcd). +// All updated objects are stored locally to keep track of object versions and +// to evaluate test results. +// All updated objects are also inserted into changedObjects queue and +// optionally sent back to the controller via its watchers. +func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Object, err error) { + r.lock.Lock() + defer r.lock.Unlock() + + glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) + + switch { + case action.Matches("update", "persistentvolumes"): + obj := action.(core.UpdateAction).GetObject() + volume := obj.(*api.PersistentVolume) + + // Check and bump object version + storedVolume, found := r.volumes[volume.Name] + if found { + storedVer, _ := strconv.Atoi(storedVolume.ResourceVersion) + requestedVer, _ := strconv.Atoi(volume.ResourceVersion) + if storedVer != requestedVer { + return true, obj, versionConflictError + } + volume.ResourceVersion = strconv.Itoa(storedVer + 1) + } + + // Store the updated object to appropriate places. + if r.volumeSource != nil { + r.volumeSource.Modify(volume) + } + r.volumes[volume.Name] = volume + r.changedObjects = append(r.changedObjects, volume) + r.changedSinceLastSync++ + glog.V(4).Infof("saved updated volume %s", volume.Name) + return true, volume, nil + + case action.Matches("update", "persistentvolumeclaims"): + obj := action.(core.UpdateAction).GetObject() + claim := obj.(*api.PersistentVolumeClaim) + + // Check and bump object version + storedClaim, found := r.claims[claim.Name] + if found { + storedVer, _ := strconv.Atoi(storedClaim.ResourceVersion) + requestedVer, _ := strconv.Atoi(claim.ResourceVersion) + if storedVer != requestedVer { + return true, obj, versionConflictError + } + claim.ResourceVersion = strconv.Itoa(storedVer + 1) + } + + // Store the updated object to appropriate places. + r.claims[claim.Name] = claim + if r.claimSource != nil { + r.claimSource.Modify(claim) + } + r.changedObjects = append(r.changedObjects, claim) + r.changedSinceLastSync++ + glog.V(4).Infof("saved updated claim %s", claim.Name) + return true, claim, nil + } + return false, nil, nil +} + +// checkVolumes compares all expectedVolumes with set of volumes at the end of +// the test and reports differences. +func (r *volumeReactor) checkVolumes(t *testing.T, expectedVolumes []*api.PersistentVolume) error { + r.lock.Lock() + defer r.lock.Unlock() + + expectedMap := make(map[string]*api.PersistentVolume) + gotMap := make(map[string]*api.PersistentVolume) + // Clear any ResourceVersion from both sets + for _, v := range expectedVolumes { + v.ResourceVersion = "" + expectedMap[v.Name] = v + } + for _, v := range r.volumes { + // We must clone the volume because of golang race check - it was + // written by the controller without any locks on it. + clone, _ := conversion.NewCloner().DeepCopy(v) + v = clone.(*api.PersistentVolume) + v.ResourceVersion = "" + if v.Spec.ClaimRef != nil { + v.Spec.ClaimRef.ResourceVersion = "" + } + gotMap[v.Name] = v + } + if !reflect.DeepEqual(expectedMap, gotMap) { + // Print ugly but useful diff of expected and received objects for + // easier debugging. + return fmt.Errorf("Volume check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap)) + } + return nil +} + +// checkClaims compares all expectedClaims with set of claims at the end of the +// test and reports differences. +func (r *volumeReactor) checkClaims(t *testing.T, expectedClaims []*api.PersistentVolumeClaim) error { + r.lock.Lock() + defer r.lock.Unlock() + + expectedMap := make(map[string]*api.PersistentVolumeClaim) + gotMap := make(map[string]*api.PersistentVolumeClaim) + for _, c := range expectedClaims { + c.ResourceVersion = "" + expectedMap[c.Name] = c + } + for _, c := range r.claims { + // We must clone the claim because of golang race check - it was + // written by the controller without any locks on it. + clone, _ := conversion.NewCloner().DeepCopy(c) + c = clone.(*api.PersistentVolumeClaim) + c.ResourceVersion = "" + gotMap[c.Name] = c + } + if !reflect.DeepEqual(expectedMap, gotMap) { + // Print ugly but useful diff of expected and received objects for + // easier debugging. + return fmt.Errorf("Claim check failed [A-expected, B-got result]: %s", diff.ObjectDiff(expectedMap, gotMap)) + } + return nil +} + +// popChange returns one recorded updated object, either *api.PersistentVolume +// or *api.PersistentVolumeClaim. Returns nil when there are no changes. +func (r *volumeReactor) popChange() interface{} { + r.lock.Lock() + defer r.lock.Unlock() + + if len(r.changedObjects) == 0 { + return nil + } + + // For debugging purposes, print the queue + for _, obj := range r.changedObjects { + switch obj.(type) { + case *api.PersistentVolume: + vol, _ := obj.(*api.PersistentVolume) + glog.V(4).Infof("reactor queue: %s", vol.Name) + case *api.PersistentVolumeClaim: + claim, _ := obj.(*api.PersistentVolumeClaim) + glog.V(4).Infof("reactor queue: %s", claim.Name) + } + } + + // Pop the first item from the queue and return it + obj := r.changedObjects[0] + r.changedObjects = r.changedObjects[1:] + return obj +} + +// syncAll simulates the controller periodic sync of volumes and claim. It +// simply adds all these objects to the internal queue of updates. This method +// should be used when the test manually calls syncClaim/syncVolume. Test that +// use real controller loop (ctrl.Run()) will get periodic sync automatically. +func (r *volumeReactor) syncAll() { + r.lock.Lock() + defer r.lock.Unlock() + + for _, c := range r.claims { + r.changedObjects = append(r.changedObjects, c) + } + for _, v := range r.volumes { + r.changedObjects = append(r.changedObjects, v) + } + r.changedSinceLastSync = 0 +} + +func (r *volumeReactor) getChangeCount() int { + r.lock.Lock() + defer r.lock.Unlock() + return r.changedSinceLastSync +} + +func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource, claimSource *framework.FakeControllerSource) *volumeReactor { + reactor := &volumeReactor{ + volumes: make(map[string]*api.PersistentVolume), + claims: make(map[string]*api.PersistentVolumeClaim), + ctrl: ctrl, + volumeSource: volumeSource, + claimSource: claimSource, + } + client.AddReactor("*", "*", reactor.React) + return reactor +} + +func newPersistentVolumeController(kubeClient clientset.Interface) *PersistentVolumeController { + ctrl := &PersistentVolumeController{ + volumes: newPersistentVolumeOrderedIndex(), + claims: cache.NewStore(cache.MetaNamespaceKeyFunc), + kubeClient: kubeClient, + } + return ctrl +} + +// newVolume returns a new volume with given attributes +func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, annotations ...string) *api.PersistentVolume { + volume := api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: name, + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + }, + Status: api.PersistentVolumeStatus{ + Phase: phase, + }, + } + + if boundToClaimName != "" { + volume.Spec.ClaimRef = &api.ObjectReference{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + UID: types.UID(boundToClaimUID), + Namespace: testNamespace, + Name: boundToClaimName, + } + } + + if len(annotations) > 0 { + volume.Annotations = make(map[string]string) + for _, a := range annotations { + volume.Annotations[a] = "yes" + } + } + + return &volume +} + +// newVolumeArray returns array with a single volume that would be returned by +// newVolume() with the same parameters. +func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, annotations ...string) []*api.PersistentVolume { + return []*api.PersistentVolume{ + newVolume(name, capacity, boundToClaimUID, boundToClaimName, phase, annotations...), + } +} + +// newClaim returns a new claim with given attributes +func newClaim(name, claimUID, capacity, boundToVolume string, phase api.PersistentVolumeClaimPhase, annotations ...string) *api.PersistentVolumeClaim { + claim := api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: testNamespace, + UID: types.UID(claimUID), + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), + }, + }, + VolumeName: boundToVolume, + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: phase, + }, + } + // Make sure api.GetReference(claim) works + claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", name) + + if len(annotations) > 0 { + claim.Annotations = make(map[string]string) + for _, a := range annotations { + claim.Annotations[a] = "yes" + } + } + return &claim +} + +// newClaimArray returns array with a single claim that would be returned by +// newClaim() with the same parameters. +func newClaimArray(name, claimUID, capacity, boundToVolume string, phase api.PersistentVolumeClaimPhase, annotations ...string) []*api.PersistentVolumeClaim { + return []*api.PersistentVolumeClaim{ + newClaim(name, claimUID, capacity, boundToVolume, phase, annotations...), + } +} + +func testSyncClaim(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + return ctrl.syncClaim(test.initialClaims[0]) +} + +func testSyncClaimError(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + err := ctrl.syncClaim(test.initialClaims[0]) + + if err != nil { + return nil + } + return fmt.Errorf("syncClaim succeeded when failure was expected") +} + +func testSyncVolume(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + return ctrl.syncVolume(test.initialVolumes[0]) +} + +func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest, t *testing.T) { + // Evaluate results + if err := reactor.checkClaims(t, test.expectedClaims); err != nil { + t.Errorf("Test %q: %v", test.name, err) + + } + if err := reactor.checkVolumes(t, test.expectedVolumes); err != nil { + t.Errorf("Test %q: %v", test.name, err) + } +} + +// Test single call to syncClaim and syncVolume methods. +// For all tests: +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncClaim/syncVolume) via +// controllerTest.testCall *once*. +// 3. Compare resulting volumes and claims with expected volumes and claims. +func runSyncTests(t *testing.T, tests []controllerTest) { + for _, test := range tests { + glog.V(4).Infof("starting test %q", test.name) + + // Initialize the controller + client := &fake.Clientset{} + ctrl := newPersistentVolumeController(client) + reactor := newVolumeReactor(client, ctrl, nil, nil) + for _, claim := range test.initialClaims { + ctrl.claims.Add(claim) + reactor.claims[claim.Name] = claim + } + for _, volume := range test.initialVolumes { + ctrl.volumes.store.Add(volume) + reactor.volumes[volume.Name] = volume + } + + // Run the tested functions + err := test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + evaluateTestResults(ctrl, reactor, test, t) + } +} From eff6b50b936e0d97f2674b1a6962ac9a00e55b17 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:08 +0200 Subject: [PATCH 07/34] Bind unbound claims in syncClaim. --- .../persistentvolume_controller.go | 433 +++++++++++++++++- .../persistentvolume_sync_test.go | 162 +++++++ 2 files changed, 591 insertions(+), 4 deletions(-) create mode 100644 pkg/controller/persistentvolume/persistentvolume_sync_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 1cda746f289..6b1126e4fb2 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -17,6 +17,7 @@ limitations under the License. package persistentvolume import ( + "fmt" "time" "k8s.io/kubernetes/pkg/api" @@ -24,6 +25,7 @@ import ( clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" vol "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/watch" @@ -31,6 +33,50 @@ import ( "github.com/golang/glog" ) +// Design: +// +// The fundamental key to this design is the bi-directional "pointer" between +// PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs), which is +// represented here as pvc.Spec.VolumeName and pv.Spec.ClaimRef. The bi-directionality +// is complicated to manage in a transactionless system, but without it we +// can't ensure sane behavior in the face of different forms of trouble. For +// example, a rogue HA controller instance could end up racing and making +// multiple bindings that are indistinguishable, resulting in potential data +// loss. +// +// This controller is designed to work in active-passive high availability mode. +// It *could* work also in active-active HA mode, all the object transitions are +// designed to cope with this, however performance could be lower as these two +// active controllers will step on each other toes frequently. +// +// This controller supports pre-bound (by the creator) objects in both +// directions: a PVC that wants a specific PV or a PV that is reserved for a +// specific PVC. +// +// The binding is two-step process. PV.Spec.ClaimRef is modified first and +// PVC.Spec.VolumeName second. At any point of this transaction, the PV or PVC +// can be modified by user or other controller or completelly deleted. Also, two +// (or more) controllers may try to bind different volumes to different claims +// at the same time. The controller must recover from any conflicts that may +// arise from these conditions. + +// annBindCompleted annotation applies to PVCs. It indicates that the lifecycle +// of the PVC has passed through the initial setup. This information changes how +// we interpret some observations of the state of the objects. Value of this +// annotation does not matter. +const annBindCompleted = "pv.kubernetes.io/bind-completed" + +// annBoundByController annotation applies to PVs and PVCs. It indicates that +// the binding (PV->PVC or PVC->PV) was installed by the controller. The +// absence of this annotation means the binding was done by the user (i.e. +// pre-bound). Value of this annotation does not matter. +const annBoundByController = "pv.kubernetes.io/bound-by-controller" + +// annClass annotation represents a new field which instructs dynamic +// provisioning to choose a particular storage class (aka profile). +// Value of this annotation should be empty. +const annClass = "volume.alpha.kubernetes.io/storage-class" + // PersistentVolumeController is a controller that synchronizes // PersistentVolumeClaims and PersistentVolumes. It starts two // framework.Controllers that watch PerstentVolume and PersistentVolumeClaim @@ -229,14 +275,157 @@ func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { } } -func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error { - glog.V(4).Infof("synchronizing PersistentVolume[%s], current phase: %s", volume.Name, volume.Status.Phase) +// syncClaim is the main controller method to decide what to do with a claim. +// It's invoked by appropriate framework.Controller callbacks when a claim is +// created, updated or periodically synced. We do not differentiate between +// these events. +// For easier readability, it was split into syncUnboundClaim and syncBoundClaim +// methods. +func (ctrl *PersistentVolumeController) syncClaim(claim *api.PersistentVolumeClaim) error { + glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + if !hasAnnotation(claim.ObjectMeta, annBindCompleted) { + return ctrl.syncUnboundClaim(claim) + } else { + return ctrl.syncBoundClaim(claim) + } +} + +// syncUnboundClaim is the main controller method to decide what to do with an +// unbound claim. +func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVolumeClaim) error { + // This is a new PVC that has not completed binding + // OBSERVATION: pvc is "Pending" + if claim.Spec.VolumeName == "" { + // User did not care which PV they get. + // [Unit test set 1] + volume, err := ctrl.volumes.findBestMatchForClaim(claim) + if err != nil { + glog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) + return fmt.Errorf("Error finding PV for claim %q: %v", claimToClaimKey(claim), err) + } + if volume == nil { + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) + // No PV could be found + // OBSERVATION: pvc is "Pending", will retry + if hasAnnotation(claim.ObjectMeta, annClass) { + // TODO: provisioning + //plugin := findProvisionerPluginForPV(pv) // Need to flesh this out + //if plugin != nil { + //FIXME: left off here + // No match was found and provisioning was requested. + // + // maintain a map with the current provisioner goroutines that are running + // if the key is already present in the map, return + // + // launch the goroutine that: + // 1. calls plugin.Provision to make the storage asset + // 2. gets back a PV object (partially filled) + // 3. create the PV API object, with claimRef -> pvc + // 4. deletes itself from the map when it's done + // return + //} else { + // make an event calling out that no provisioner was configured + // return, try later? + //} + } + // Mark the claim as Pending and try to find a match in the next + // periodic syncClaim + if _, err = ctrl.updateClaimPhase(claim, api.ClaimPending); err != nil { + return err + } + return nil + } else /* pv != nil */ { + // Found a PV for this claim + // OBSERVATION: pvc is "Pending", pv is "Available" + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume)) + if err = ctrl.bind(volume, claim); err != nil { + // On any error saving the volume or the claim, subsequent + // syncClaim will finish the binding. + return err + } + // OBSERVATION: claim is "Bound", pv is "Bound" + return nil + } + } else /* pvc.Spec.VolumeName != nil */ { + // [Unit test set 2] + // User asked for a specific PV. + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) + obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) + if err != nil { + return err + } + if !found { + // User asked for a PV that does not exist. + // OBSERVATION: pvc is "Pending" + // Retry later. + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) + if _, err = ctrl.updateClaimPhase(claim, api.ClaimPending); err != nil { + return err + } + return nil + } else { + volume, ok := obj.(*api.PersistentVolume) + if !ok { + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + } + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + if volume.Spec.ClaimRef == nil { + // User asked for a PV that is not claimed + // OBSERVATION: pvc is "Pending", pv is "Available" + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) + if err = ctrl.bind(volume, claim); err != nil { + // On any error saving the volume or the claim, subsequent + // syncClaim will finish the binding. + return err + } + // OBSERVATION: pvc is "Bound", pv is "Bound" + return nil + } else if isVolumeBoundToClaim(volume, claim) { + // User asked for a PV that is claimed by this PVC + // OBSERVATION: pvc is "Pending", pv is "Bound" + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) + + // Finish the volume binding by adding claim UID. + if err = ctrl.bind(volume, claim); err != nil { + return err + } + // OBSERVATION: pvc is "Bound", pv is "Bound" + return nil + } else { + // User asked for a PV that is claimed by someone else + // OBSERVATION: pvc is "Pending", pv is "Bound" + if !hasAnnotation(claim.ObjectMeta, annBoundByController) { + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) + // User asked for a specific PV, retry later + if _, err = ctrl.updateClaimPhase(claim, api.ClaimPending); err != nil { + return err + } + return nil + } else { + // This should never happen because someone had to remove + // annBindCompleted annotation on the claim. + otherClaimName := fmt.Sprintf("%s/%s", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), otherClaimName) + return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, otherClaimName) + } + } + } + } +} + +// syncBoundClaim is the main controller method to decide what to do with a +// bound claim. +func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolumeClaim) error { return nil } -func (ctrl *PersistentVolumeController) syncClaim(claim *api.PersistentVolumeClaim) error { - glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s], current phase: %s", claim.Name, claim.Status.Phase) +// syncVolume is the main controller method to decide what to do with a volume. +// It's invoked by appropriate framework.Controller callbacks when a volume is +// created, updated or periodically synced. We do not differentiate between +// these events. +func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) return nil } @@ -271,6 +460,242 @@ func (ctrl *PersistentVolumeController) isFullySynced() bool { return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() } +// updateClaimPhase saves new claim phase to API server. +func (ctrl *PersistentVolumeController) updateClaimPhase(claim *api.PersistentVolumeClaim, phase api.PersistentVolumeClaimPhase) (*api.PersistentVolumeClaim, error) { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: set phase %s", claimToClaimKey(claim), phase) + if claim.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: phase %s already set", claimToClaimKey(claim), phase) + return claim, nil + } + + clone, err := conversion.NewCloner().DeepCopy(claim) + if err != nil { + return nil, fmt.Errorf("Error cloning claim: %v", err) + } + claimClone, ok := clone.(*api.PersistentVolumeClaim) + if !ok { + return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone) + } + + claimClone.Status.Phase = phase + newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: set phase %s failed: %v", claimToClaimKey(claim), phase, err) + return newClaim, err + } + glog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) + return newClaim, nil +} + +// updateVolumePhase saves new volume phase to API server. +func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.PersistentVolume, phase api.PersistentVolumePhase) (*api.PersistentVolume, error) { + glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) + if volume.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) + return volume, nil + } + + clone, err := conversion.NewCloner().DeepCopy(volume) + if err != nil { + return nil, fmt.Errorf("Error cloning claim: %v", err) + } + volumeClone, ok := clone.(*api.PersistentVolume) + if !ok { + return nil, fmt.Errorf("Unexpected volume cast error : %v", volumeClone) + } + + volumeClone.Status.Phase = phase + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().UpdateStatus(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) + return newVol, err + } + glog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) + return newVol, err +} + +// bindVolumeToClaim modifes given volume to be bound to a claim and saves it to +// API server. The claim is not modified in this method! +func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) (*api.PersistentVolume, error) { + glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) + + dirty := false + + // Check if the volume was already bound (either by user or by controller) + shouldSetBoundByController := false + if !isVolumeBoundToClaim(volume, claim) { + shouldSetBoundByController = true + } + + // The volume from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + clone, err := conversion.NewCloner().DeepCopy(volume) + if err != nil { + return nil, fmt.Errorf("Error cloning pv: %v", err) + } + volumeClone, ok := clone.(*api.PersistentVolume) + if !ok { + return nil, fmt.Errorf("Unexpected volume cast error : %v", volumeClone) + } + + // Bind the volume to the claim if it is not bound yet + if volume.Spec.ClaimRef == nil || + volume.Spec.ClaimRef.Name != claim.Name || + volume.Spec.ClaimRef.Namespace != claim.Namespace || + volume.Spec.ClaimRef.UID != claim.UID { + + claimRef, err := api.GetReference(claim) + if err != nil { + return nil, fmt.Errorf("Unexpected error getting claim reference: %v", err) + } + volumeClone.Spec.ClaimRef = claimRef + dirty = true + } + + // Set annBoundByController if it is not set yet + if shouldSetBoundByController && !hasAnnotation(volumeClone.ObjectMeta, annBoundByController) { + setAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes") + dirty = true + } + + // Save the volume only if something was changed + if dirty { + glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volume.Name) + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volume.Name, claimToClaimKey(claim), err) + return newVol, err + } + glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) + return newVol, nil + } + + glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) + return volume, nil +} + +// bindClaimToVolume modifes given claim to be bound to a volume and saves it to +// API server. The volume is not modified in this method! +func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *api.PersistentVolumeClaim, volume *api.PersistentVolume) (*api.PersistentVolumeClaim, error) { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) + + dirty := false + + // Check if the claim was already bound (either by controller or by user) + shouldSetBoundByController := false + if volume.Name != claim.Spec.VolumeName { + shouldSetBoundByController = true + } + + // The claim from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + clone, err := conversion.NewCloner().DeepCopy(claim) + if err != nil { + return nil, fmt.Errorf("Error cloning claim: %v", err) + } + claimClone, ok := clone.(*api.PersistentVolumeClaim) + if !ok { + return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone) + } + + // Bind the claim to the volume if it is not bound yet + if claimClone.Spec.VolumeName != volume.Name { + claimClone.Spec.VolumeName = volume.Name + dirty = true + } + + // Set annBoundByController if it is not set yet + if shouldSetBoundByController && !hasAnnotation(claimClone.ObjectMeta, annBoundByController) { + setAnnotation(&claimClone.ObjectMeta, annBoundByController, "yes") + dirty = true + } + + // Set annBindCompleted if it is not set yet + if !hasAnnotation(claimClone.ObjectMeta, annBindCompleted) { + setAnnotation(&claimClone.ObjectMeta, annBindCompleted, "yes") + dirty = true + } + + if dirty { + glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) + return newClaim, err + } + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) + return newClaim, nil + } + + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) + return claim, nil +} + +// bind saves binding information both to the volume and the claim and marks +// both objects as Bound. Volume is saved first. +// It returns on first error, it's up to the caller to implement some retry +// mechanism. +func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) error { + var err error + + glog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) + + if volume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + + if volume, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + + if claim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + + if _, err = ctrl.updateClaimPhase(claim, api.ClaimBound); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + + glog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + glog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) + glog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + return nil +} + +func hasAnnotation(obj api.ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +func setAnnotation(obj *api.ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string { + everBound := hasAnnotation(claim.ObjectMeta, annBindCompleted) + boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController) + + return fmt.Sprintf("phase: %s, bound to: %q, wasEverBound: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, everBound, boundByController) +} + +func getVolumeStatusForLogging(volume *api.PersistentVolume) string { + boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController) + claimName := "" + if volume.Spec.ClaimRef != nil { + claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID) + } + return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController) +} + // isVolumeBoundToClaim returns true, if given volume is pre-bound or bound // to specific claim. Both claim.Name and claim.Namespace must be equal. // If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go new file mode 100644 index 00000000000..a886d50d2c3 --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncClaim and syncVolume methods. +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncClaim/syncVolume) via +// controllerTest.testCall *once*. +// 3. Compare resulting volumes and claims with expected volumes and claims. +func TestSync(t *testing.T) { + tests := []controllerTest{ + // [Unit test set 1] User did not care which PV they get. + // Test the matching with no claim.Spec.VolumeName and with various + // volumes. + { + // syncClaim binds to a matching unbound volume. + "1-1 - successful bind", + newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, annBoundByController), + newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim does not do anything when there is no matching volume. + "1-2 - noop", + newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending), + newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), + newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), + testSyncClaim, + }, + { + // syncClaim resets claim.Status to Pending when there is no + // matching volume. + "1-3 - reset to Pending", + newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending), + newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound), + newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending), + testSyncClaim, + }, + { + // syncClaim binds claims to the smallest matching volume + "1-4 - smallest volume", + []*api.PersistentVolume{ + newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending), + newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending), + }, + []*api.PersistentVolume{ + newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending), + newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, annBoundByController), + }, + newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim binds a claim only to volume that points to it (by + // name), even though a smaller one is available. + "1-5 - prebound volume by name - success", + []*api.PersistentVolume{ + newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending), + newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending), + }, + []*api.PersistentVolume{ + newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound), + newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending), + }, + newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim binds a claim only to volume that points to it (by + // UID), even though a smaller one is available. + "1-6 - prebound volume by UID - success", + []*api.PersistentVolume{ + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending), + newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending), + }, + []*api.PersistentVolume{ + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound), + newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending), + }, + newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim does not bind claim to a volume prebound to a claim with + // same name and different UID + "1-7 - prebound volume to different claim", + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending), + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending), + newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), + testSyncClaim, + }, + { + // syncClaim completes binding - simulates controller crash after + // PV.ClaimRef is saved + "1-8 - complete bind after crash - PV bound", + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, annBoundByController), + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, annBoundByController), + newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim completes binding - simulates controller crash after + // PV.Status is saved + "1-9 - complete bind after crash - PV status saved", + newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, annBoundByController), + newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, annBoundByController), + newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + /* TODO: enable when syncClaim with annBindCompleted is implemented + controllerTest{ + // syncClaim completes binding - simulates controller crash after + // PVC.VolumeName is saved + "10 - complete bind after crash - PVC bound", + []*api.PersistentVolume{ + newVolume("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), + }, + []*api.PersistentVolume{ + newVolume("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), + }, + []*api.PersistentVolumeClaim{ + newClaim("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), + }, + []*api.PersistentVolumeClaim{ + newClaim("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), + }, + testSyncClaim, + }, + */ + } + runSyncTests(t, tests) +} From 5949b956f5f91924758f35da62339cc517e5036c Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:09 +0200 Subject: [PATCH 08/34] Implement syncClaim with bound claims. --- .../persistentvolume_controller.go | 77 +++++++- .../persistentvolume_sync_test.go | 166 +++++++++++++++--- 2 files changed, 222 insertions(+), 21 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 6b1126e4fb2..b10b3b05660 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -417,7 +417,82 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVo // syncBoundClaim is the main controller method to decide what to do with a // bound claim. func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolumeClaim) error { - return nil + // hasAnnotation(pvc, annBindCompleted) + // This PVC has previously been bound + // OBSERVATION: pvc is not "Pending" + // [Unit test set 3] + if claim.Spec.VolumeName == "" { + // Claim was bound before but not any more. + if claim.Status.Phase != api.ClaimLost { + // Log the error only once, when we enter 'Lost' phase + glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume reference lost!", claimToClaimKey(claim)) + } + // TODO: emit event and save reason + if _, err := ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { + return err + } + return nil + } + obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) + if err != nil { + return err + } + if !found { + // Claim is bound to a non-existing volume. + if claim.Status.Phase != api.ClaimLost { + // Log the error only once, when we enter 'Lost' phase + glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q lost!", claimToClaimKey(claim), claim.Spec.VolumeName) + } + // TODO: emit event and save reason + if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { + return err + } + return nil + } else { + volume, ok := obj.(*api.PersistentVolume) + if !ok { + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + } + + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, ctrl.getVolumeStatusForLogging(volume)) + if volume.Spec.ClaimRef == nil { + // Claim is bound but volume has come unbound. + // Or, a claim was bound and the controller has not received updated + // volume yet. We can't distinguish these cases. + // Bind the volume again and set all states to Bound. + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) + if err = ctrl.bind(volume, claim); err != nil { + // Objects not saved, next syncPV or syncClaim will try again + return err + } + return nil + } else if volume.Spec.ClaimRef.UID == claim.UID { + // All is well + // NOTE: syncPV can handle this so it can be left out. + // NOTE: bind() call here will do nothing in most cases as + // everything should be already set. + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) + if err = ctrl.bind(volume, claim); err != nil { + // Objects not saved, next syncPV or syncClaim will try again + return err + } + return nil + } else { + // Claim is bound but volume has a different claimant. + // Set the claim phase to 'Lost', which is a terminal + // phase. + otherClaimName := fmt.Sprintf("%s/%s", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) + if claim.Status.Phase != api.ClaimLost { + // Log the error only once, when we enter 'Lost' phase + glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q bound to another claim %q, this claim is lost!", claimToClaimKey(claim), claim.Spec.VolumeName, otherClaimName) + } + // TODO: emit event and save reason + if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { + return err + } + return nil + } + } } // syncVolume is the main controller method to decide what to do with a volume. diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go index a886d50d2c3..5008a346e93 100644 --- a/pkg/controller/persistentvolume/persistentvolume_sync_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -137,26 +137,152 @@ func TestSync(t *testing.T) { newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), testSyncClaim, }, - /* TODO: enable when syncClaim with annBindCompleted is implemented - controllerTest{ - // syncClaim completes binding - simulates controller crash after - // PVC.VolumeName is saved - "10 - complete bind after crash - PVC bound", - []*api.PersistentVolume{ - newVolume("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), - }, - []*api.PersistentVolume{ - newVolume("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), - }, - []*api.PersistentVolumeClaim{ - newClaim("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), - }, - []*api.PersistentVolumeClaim{ - newClaim("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), - }, - testSyncClaim, - }, - */ + { + // syncClaim completes binding - simulates controller crash after + // PVC.VolumeName is saved + "10 - complete bind after crash - PVC bound", + newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), + newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), + newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), + newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + // [Unit test set 2] User asked for a specific PV. + // Test the binding when pv.ClaimRef is already set by controller or + // by user. + { + // syncClaim with claim pre-bound to a PV that does not exist + "2-1 - claim prebound to non-existing volume - noop", + novolumes, + novolumes, + newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), + newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), + testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that does not exist. + // Check that the claim status is reset to Pending + "2-2 - claim prebound to non-existing volume - reset status", + novolumes, + novolumes, + newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound), + newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending), + testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that exists and is + // unbound. Check it gets bound and no annBoundByController is set. + "2-3 - claim prebound to unbound volume", + newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, annBoundByController), + newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending), + newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted), + testSyncClaim, + }, + { + // claim with claim pre-bound to a PV that is pre-bound to the claim + // by name. Check it gets bound and no annBoundByController is set. + "2-4 - claim prebound to prebound volume by name", + newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending), + newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound), + newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending), + newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that is pre-bound to the + // claim by UID. Check it gets bound and no annBoundByController is + // set. + "2-5 - claim prebound to prebound volume by UID", + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending), + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound), + newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending), + newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that is bound to different + // claim. Check it's reset to Pending. + "2-6 - claim prebound to already bound volume", + newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound), + newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound), + newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound), + newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending), + testSyncClaim, + }, + { + // syncClaim with claim bound by controller to a PV that is bound to + // different claim. Check it throws an error. + "2-7 - claim bound by controller to already bound volume", + newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound), + newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound), + newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), + newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), + testSyncClaimError, + }, + // [Unit test set 3] Syncing bound claim + { + // syncClaim with claim bound and its claim.Spec.VolumeName is + // removed. Check it's marked as Lost. + "3-1 - bound claim with missing VolumeName", + novolumes, + novolumes, + newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim bound to non-exising volume. Check it's + // marked as Lost. + "3-2 - bound claim with missing volume", + novolumes, + novolumes, + newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim bound to unbound volume. Check it's bound. + // Also check that Pending phase is set to Bound + "3-3 - bound claim with unbound volume", + newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending), + newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, annBoundByController), + newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted), + newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim bound to volume with missing (or different) + // volume.Spec.ClaimRef.UID. Check that the claim is marked as lost. + "3-4 - bound claim with prebound volume", + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending), + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending), + newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted), + newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim bound to bound volume. Check that the + // controller does not do anything. Also check that Pending phase is + // set to Bound + "3-5 - bound claim with bound volume", + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending), + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound), + newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted), + newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted), + testSyncClaim, + }, + { + // syncClaim with claim bound to a volume that is bound to different + // claim. Check that the claim is marked as lost. + // TODO: test that an event is emitted + "3-6 - bound claim with bound volume", + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending), + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending), + newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted), + newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), + testSyncClaim, + }, } runSyncTests(t, tests) } From 0be1512f1c5fe67951277581c430efe5f1625d67 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:10 +0200 Subject: [PATCH 09/34] Add ClaimLost phase. --- pkg/api/testing/fuzzer.go | 2 +- pkg/api/types.go | 4 ++++ pkg/api/v1/types.go | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 052ce3f4957..5f8dadc5175 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -339,7 +339,7 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) }, func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) { c.FuzzNoCustom(pvc) // fuzz self without calling this function again - types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending} + types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending, api.ClaimLost} pvc.Status.Phase = types[c.Rand.Intn(len(types))] }, func(s *api.NamespaceSpec, c fuzz.Continue) { diff --git a/pkg/api/types.go b/pkg/api/types.go index 4b6ec153b89..ed568439260 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -411,6 +411,10 @@ const ( ClaimPending PersistentVolumeClaimPhase = "Pending" // used for PersistentVolumeClaims that are bound ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" ) // Represents a host path mapped into a pod. diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 00381d20e83..38faa6a9026 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -506,6 +506,10 @@ const ( ClaimPending PersistentVolumeClaimPhase = "Pending" // used for PersistentVolumeClaims that are bound ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" ) // Represents a host path mapped into a pod. From f4f252e81cc007900e6465d7334e1f9f3d4b61f4 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:11 +0200 Subject: [PATCH 10/34] Implement syncVolume. --- .../persistentvolume_controller.go | 215 +++++++++++++++++- .../persistentvolume_index.go | 4 + .../persistentvolume_sync_test.go | 91 ++++++++ 3 files changed, 303 insertions(+), 7 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index b10b3b05660..a2d5679882e 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -405,9 +405,8 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVo } else { // This should never happen because someone had to remove // annBindCompleted annotation on the claim. - otherClaimName := fmt.Sprintf("%s/%s", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), otherClaimName) - return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, otherClaimName) + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) + return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) } } } @@ -454,7 +453,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, ctrl.getVolumeStatusForLogging(volume)) + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // Claim is bound but volume has come unbound. // Or, a claim was bound and the controller has not received updated @@ -481,10 +480,9 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu // Claim is bound but volume has a different claimant. // Set the claim phase to 'Lost', which is a terminal // phase. - otherClaimName := fmt.Sprintf("%s/%s", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) if claim.Status.Phase != api.ClaimLost { // Log the error only once, when we enter 'Lost' phase - glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q bound to another claim %q, this claim is lost!", claimToClaimKey(claim), claim.Spec.VolumeName, otherClaimName) + glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q bound to another claim %q, this claim is lost!", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) } // TODO: emit event and save reason if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { @@ -502,7 +500,164 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error { glog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) - return nil + // [Unit test set 4] + if volume.Spec.ClaimRef == nil { + // Volume is unused + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) + if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable); err != nil { + // Nothing was saved; we will fall back into the same + // condition in the next call to this method + return err + } + return nil + } else /* pv.Spec.ClaimRef != nil */ { + // Volume is bound to a claim. + if volume.Spec.ClaimRef.UID == "" { + // The PV is reserved for a PVC; that PVC has not yet been + // bound to this PV; the PVC sync will handle it. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable); err != nil { + // Nothing was saved; we will fall back into the same + // condition in the next call to this method + return err + } + return nil + } + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + // Get the PVC by _name_ + var claim *api.PersistentVolumeClaim + claimName := claimrefToClaimKey(volume.Spec.ClaimRef) + obj, found, err := ctrl.claims.GetByKey(claimName) + if err != nil { + return err + } + if !found { + glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + // Fall through with claim = nil + } else { + var ok bool + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + } + glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) + } + if claim != nil && claim.UID != volume.Spec.ClaimRef.UID { + // The claim that the PV was pointing to was deleted, and another + // with the same name created. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + // Treat the volume as bound to a missing claim. + claim = nil + } + + if claim == nil { + // If we get into this block, the claim must have been deleted; + // NOTE: reclaimVolume may either release the PV back into the pool or + // recycle it or do nothing (retain) + + // Do not overwrite previous Failed state - let the user see that + // something went wrong, while we still re-try to reclaim the + // volume. + if volume.Status.Phase != api.VolumeReleased && volume.Status.Phase != api.VolumeFailed { + // Also, log this only once: + glog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) + if volume, err = ctrl.updateVolumePhase(volume, api.VolumeReleased); err != nil { + // Nothing was saved; we will fall back into the same condition + // in the next call to this method + return err + } + } + + // HOWTO RELEASE A PV + if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRetain { + glog.V(4).Infof("synchronizing PersistentVolume[%s]: policy is Retain, nothing to do", volume.Name) + return nil + } + // TODO: implement recycler + return nil + /* + else if pv.Spec.ReclaimPolicy == "Delete" { + plugin := findDeleterPluginForPV(pv) + if plugin != nil { + // maintain a map with the current deleter goroutines that are running + // if the key is already present in the map, return + // + // launch the goroutine that: + // 1. deletes the storage asset + // 2. deletes the PV API object + // 3. deletes itself from the map when it's done + } else { + // make an event calling out that no deleter was configured + // mark the PV as failed + // NB: external provisioners/deleters are currently not + // considered. + } + } else if pv.Spec.ReclaimPolicy == "Recycle" { + plugin := findRecyclerPluginForPV(pv) + if plugin != nil { + // maintain a map of running scrubber-pod-monitoring + // goroutines, guarded by mutex + // + // launch a goroutine that: + // 0. verify the PV object still needs to be recycled or return + // 1. launches a scrubber pod; the pod's name is deterministically created based on PV uid + // 2. if the pod is rejected for dup, adopt the existing pod + // 2.5. if the pod is rejected for any other reason, retry later + // 3. else (the create succeeds), ok + // 4. wait for pod completion + // 5. marks the PV API object as available + // 5.5. clear ClaimRef.UID + // 5.6. if boundByController, clear ClaimRef & boundByController annotation + // 6. deletes itself from the map when it's done + } else { + // make an event calling out that no recycler was configured + // mark the PV as failed + } + }*/ + } else if claim.Spec.VolumeName == "" { + // This block collapses into a NOP; we're leaving this here for + // completeness. + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // The binding is not completed; let PVC sync handle it + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) + } else { + // Dangling PV; try to re-establish the link in the PVC sync + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) + } + return nil + } else if claim.Spec.VolumeName == volume.Name { + // Volume is bound to a claim properly, update status if necessary + glog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) + if _, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil { + // Nothing was saved; we will fall back into the same + // condition in the next call to this method + return err + } + return nil + } else { + // Volume is bound to a claim, but the claim is bound elsewhere + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // This is part of the normal operation of the controller; the + // controller tried to use this volume for a claim but the claim + // was fulfilled by another volume. We did this; fix it. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) + if err = ctrl.unbindVolume(volume); err != nil { + return err + } + return nil + } else { + // The PV must have been created with this ptr; leave it alone. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) + // This just updates the volume phase and clears + // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound + // to the claim. + if err = ctrl.unbindVolume(volume); err != nil { + return err + } + return nil + } + } + } } // Run starts all of this controller's control loops @@ -743,6 +898,52 @@ func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim return nil } +// unbindVolume rolls back previous binding of the volume. This may be necessary +// when two controllers bound two volumes to single claim - when we detect this, +// only one binding succeeds and the second one must be rolled back. +// This method updates both Spec and Status. +// It returns on first error, it's up to the caller to implement some retry +// mechanism. +func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + + // Save the PV only when any modification is neccesary. + clone, err := conversion.NewCloner().DeepCopy(volume) + if err != nil { + return fmt.Errorf("Error cloning pv: %v", err) + } + volumeClone, ok := clone.(*api.PersistentVolume) + if !ok { + return fmt.Errorf("Unexpected volume cast error : %v", volumeClone) + } + + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // The volume was bound by the controller. + volumeClone.Spec.ClaimRef = nil + delete(volumeClone.Annotations, annBoundByController) + if len(volumeClone.Annotations) == 0 { + // No annotations look better than empty annotation map (and it's easier + // to test). + volumeClone.Annotations = nil + } + } else { + // The volume was pre-bound by user. Clear only the binging UID. + volumeClone.Spec.ClaimRef.UID = "" + } + + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) + return err + } + glog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) + + // Update the status + _, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable) + return err + +} + func hasAnnotation(obj api.ObjectMeta, ann string) bool { _, found := obj.Annotations[ann] return found diff --git a/pkg/controller/persistentvolume/persistentvolume_index.go b/pkg/controller/persistentvolume/persistentvolume_index.go index a86bf7c2b45..8da72389bdb 100644 --- a/pkg/controller/persistentvolume/persistentvolume_index.go +++ b/pkg/controller/persistentvolume/persistentvolume_index.go @@ -244,3 +244,7 @@ func (c byAccessModes) Len() int { func claimToClaimKey(claim *api.PersistentVolumeClaim) string { return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) } + +func claimrefToClaimKey(claimref *api.ObjectReference) string { + return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name) +} diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go index 5008a346e93..60e44084463 100644 --- a/pkg/controller/persistentvolume/persistentvolume_sync_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -283,6 +283,97 @@ func TestSync(t *testing.T) { newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), testSyncClaim, }, + // [Unit test set 4] All syncVolume tests. + { + // syncVolume with pending volume. Check it's marked as Available. + "4-1 - pending volume", + newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending), + newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable), + noclaims, + noclaims, + testSyncVolume, + }, + { + // syncVolume with prebound pending volume. Check it's marked as + // Available. + "4-2 - pending prebound volume", + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending), + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable), + noclaims, + noclaims, + testSyncVolume, + }, + { + // syncVolume with volume bound to missing claim. + // Check the volume gets Released + "4-3 - bound volume with missing claim", + newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound), + newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased), + noclaims, + noclaims, + testSyncVolume, + }, + { + // syncVolume with volume bound to claim with different UID. + // Check the volume gets Released. + "4-4 - volume bound to claim with different UID", + newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound), + newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased), + newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), + newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), + testSyncVolume, + }, + { + // syncVolume with volume bound by controller to unbound claim. + // Check syncVolume does not do anything. + "4-5 - volume bound by controller to unbound claim", + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, annBoundByController), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, annBoundByController), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + testSyncVolume, + }, + { + // syncVolume with volume bound by user to unbound claim. + // Check syncVolume does not do anything. + "4-5 - volume bound by user to bound claim", + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + testSyncVolume, + }, + { + // syncVolume with volume bound to bound claim. + // Check that the volume is marked as Bound. + "4-6 - volume bound by to bound claim", + newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable), + newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound), + newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), + newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), + testSyncVolume, + }, + { + // syncVolume with volume bound by controller to claim bound to + // another volume. Check that the volume is rolled back. + "4-7 - volume bound by controller to claim bound somewhere else", + newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, annBoundByController), + newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable), + newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), + newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), + testSyncVolume, + }, + { + // syncVolume with volume bound by user to claim bound to + // another volume. Check that the volume is marked as Available + // and its UID is reset. + "4-8 - volume bound by user to claim bound somewhere else", + newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound), + newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable), + newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), + newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), + testSyncVolume, + }, } runSyncTests(t, tests) } From 50b61ae16866d310e59facf1442763b5789aedef Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:12 +0200 Subject: [PATCH 11/34] Add "multi-sync" tests. These test will call syncVolume/syncClaim until they reach consistent state. --- .../persistentvolume_framework_test.go | 110 ++++++++++++++++++ .../persistentvolume_sync_test.go | 47 ++++++++ 2 files changed, 157 insertions(+) diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index 17727a31bc1..0d894fd6064 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -461,3 +461,113 @@ func runSyncTests(t *testing.T, tests []controllerTest) { evaluateTestResults(ctrl, reactor, test, t) } } + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. For all tests, the test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func runMultisyncTests(t *testing.T, tests []controllerTest) { + for _, test := range tests { + glog.V(4).Infof("starting multisync test %q", test.name) + + // Initialize the controller + client := &fake.Clientset{} + ctrl := newPersistentVolumeController(client) + reactor := newVolumeReactor(client, ctrl, nil, nil) + for _, claim := range test.initialClaims { + ctrl.claims.Add(claim) + reactor.claims[claim.Name] = claim + } + for _, volume := range test.initialVolumes { + ctrl.volumes.store.Add(volume) + reactor.volumes[volume.Name] = volume + } + + // Run the tested function + err := test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + // Simulate any "changed" events and "periodical sync" until we reach a + // stable state. + firstSync := true + counter := 0 + for { + counter++ + glog.V(4).Infof("test %q: iteration %d", test.name, counter) + + if counter > 100 { + t.Errorf("Test %q failed: too many iterations", test.name) + break + } + + obj := reactor.popChange() + if obj == nil { + // Nothing was changed, should we exit? + if firstSync || reactor.changedSinceLastSync > 0 { + // There were some changes after the last "periodic sync". + // Simulate "periodic sync" of everything (until it produces + // no changes). + firstSync = false + glog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name) + reactor.syncAll() + } else { + // Last sync did not produce any updates, the test reached + // stable state -> finish. + break + } + } + + // There were some changes, process them + switch obj.(type) { + case *api.PersistentVolumeClaim: + claim := obj.(*api.PersistentVolumeClaim) + // Simulate "claim updated" event + ctrl.claims.Update(claim) + err = ctrl.syncClaim(claim) + if err != nil { + if err == versionConflictError { + // Ignore version errors + glog.V(4).Infof("test intentionaly ignores version error.") + } else { + t.Errorf("Error calling syncClaim: %v", err) + // Finish the loop on the first error + break + } + } + // Process generated changes + continue + case *api.PersistentVolume: + volume := obj.(*api.PersistentVolume) + // Simulate "volume updated" event + ctrl.volumes.store.Update(volume) + err = ctrl.syncVolume(volume) + if err != nil { + if err == versionConflictError { + // Ignore version errors + glog.V(4).Infof("test intentionaly ignores version error.") + } else { + t.Errorf("Error calling syncVolume: %v", err) + // Finish the loop on the first error + break + } + } + // Process generated changes + continue + } + } + evaluateTestResults(ctrl, reactor, test, t) + glog.V(4).Infof("test %q finished after %d iterations", test.name, counter) + } +} diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go index 60e44084463..23670153521 100644 --- a/pkg/controller/persistentvolume/persistentvolume_sync_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -377,3 +377,50 @@ func TestSync(t *testing.T) { } runSyncTests(t, tests) } + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestMultiSync(t *testing.T) { + tests := []controllerTest{ + // Test simple binding + { + // syncClaim binds to a matching unbound volume. + "10-1 - successful bind", + newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, annBoundByController), + newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending), + newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + { + // Two controllers bound two PVs to single claim. Test one of them + // wins and the second rolls back. + "10-2 - bind PV race", + []*api.PersistentVolume{ + newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, annBoundByController), + newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, annBoundByController), + }, + []*api.PersistentVolume{ + newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, annBoundByController), + newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable), + }, + newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), + testSyncClaim, + }, + } + + runMultisyncTests(t, tests) +} From 61019b2401f156cee971c900463779919cb1a695 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:13 +0200 Subject: [PATCH 12/34] Process deleted PVs To speed up marking claims as "lost". --- .../persistentvolume_controller.go | 38 ++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index a2d5679882e..0ae3062d068 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -192,7 +192,43 @@ func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) // deleteVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { - // Intentionally left blank - we do not react on deleted volumes + if !ctrl.isFullySynced() { + return + } + + var volume *api.PersistentVolume + var ok bool + volume, ok = obj.(*api.PersistentVolume) + if !ok { + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + volume, ok = unknown.Obj.(*api.PersistentVolume) + if !ok { + glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) + return + } + } else { + glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj) + return + } + } + + if !ok || volume == nil || volume.Spec.ClaimRef == nil { + return + } + + if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists { + if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil { + // sync the claim when its volume is deleted. Explicitly syncing the + // claim here in response to volume deletion prevents the claim from + // waiting until the next sync period for its Lost status. + err := ctrl.syncClaim(claim) + if err != nil { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) + } + } else { + glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) + } + } } // addClaim is callback from framework.Controller watching PersistentVolumeClaim From af295719f60c62c6e93a78e36c3002c2e709be18 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:14 +0200 Subject: [PATCH 13/34] Add events. --- .../persistentvolume_controller.go | 16 +++-- .../persistentvolume_framework_test.go | 64 +++++++++++++++-- .../persistentvolume_sync_test.go | 68 +++++++++---------- pkg/kubectl/describe.go | 6 ++ 4 files changed, 112 insertions(+), 42 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 0ae3062d068..acaea70b318 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -23,6 +23,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/conversion" @@ -89,6 +91,7 @@ type PersistentVolumeController struct { claimController *framework.Controller claimControllerStopCh chan struct{} kubeClient clientset.Interface + eventRecorder record.EventRecorder } // NewPersistentVolumeController creates a new PersistentVolumeController @@ -99,8 +102,13 @@ func NewPersistentVolumeController( recyclers []vol.VolumePlugin, cloud cloudprovider.Interface) *PersistentVolumeController { + broadcaster := record.NewBroadcaster() + broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + recorder := broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) + controller := &PersistentVolumeController{ - kubeClient: kubeClient, + kubeClient: kubeClient, + eventRecorder: recorder, } volumeSource := &cache.ListWatch{ @@ -461,8 +469,8 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu if claim.Status.Phase != api.ClaimLost { // Log the error only once, when we enter 'Lost' phase glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume reference lost!", claimToClaimKey(claim)) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ClaimLost", "Bound claim has lost reference to PersistentVolume. Data on the volume is lost!") } - // TODO: emit event and save reason if _, err := ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { return err } @@ -477,8 +485,8 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu if claim.Status.Phase != api.ClaimLost { // Log the error only once, when we enter 'Lost' phase glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q lost!", claimToClaimKey(claim), claim.Spec.VolumeName) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ClaimLost", "Bound claim has lost its PersistentVolume. Data on the volume is lost!") } - // TODO: emit event and save reason if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { return err } @@ -519,8 +527,8 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu if claim.Status.Phase != api.ClaimLost { // Log the error only once, when we enter 'Lost' phase glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q bound to another claim %q, this claim is lost!", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ClaimMisbound", "Two claims are bound to the same volume, this one is bound incorrectly") } - // TODO: emit event and save reason if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { return err } diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index 0d894fd6064..5dede9730dc 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -21,6 +21,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "sync" "testing" @@ -32,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/conversion" @@ -63,7 +65,7 @@ import ( // - testSyncVolume - calls syncVolume on the first volume in initialVolumes. // - any custom function for specialized tests. // The test then contains list of volumes/claims that are expected at the end -// of the test. +// of the test and list of generated events. type controllerTest struct { // Name of the test, for logging name string @@ -75,6 +77,9 @@ type controllerTest struct { initialClaims []*api.PersistentVolumeClaim // Expected content of controller claim cache at the end of the test. expectedClaims []*api.PersistentVolumeClaim + // Expected events - any event with prefix will pass, we don't check full + // event message. + expectedEvents []string // Function to call as the test. test testCall } @@ -86,6 +91,7 @@ const testNamespace = "default" var versionConflictError = errors.New("VersionError") var novolumes []*api.PersistentVolume var noclaims []*api.PersistentVolumeClaim +var noevents = []string{} // volumeReactor is a core.Reactor that simulates etcd and API server. It // stores: @@ -238,6 +244,51 @@ func (r *volumeReactor) checkClaims(t *testing.T, expectedClaims []*api.Persiste return nil } +// checkEvents compares all expectedEvents with events generated during the test +// and reports differences. +func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error { + var err error + + // Read recorded events + fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder) + gotEvents := []string{} + finished := false + for !finished { + select { + case event, ok := <-fakeRecorder.Events: + if ok { + glog.V(5).Infof("event recorder got event %s", event) + gotEvents = append(gotEvents, event) + } else { + glog.V(5).Infof("event recorder finished") + finished = true + } + default: + glog.V(5).Infof("event recorder finished") + finished = true + } + } + + // Evaluate the events + for i, expected := range expectedEvents { + if len(gotEvents) <= i { + t.Errorf("Event %q not emitted", expected) + err = fmt.Errorf("Events do not match") + continue + } + received := gotEvents[i] + if !strings.HasPrefix(received, expected) { + t.Errorf("Unexpected event received, expected %q, got %q", expected, received) + err = fmt.Errorf("Events do not match") + } + } + for i := len(expectedEvents); i < len(gotEvents); i++ { + t.Errorf("Unexpected event received: %q", gotEvents[i]) + err = fmt.Errorf("Events do not match") + } + return err +} + // popChange returns one recorded updated object, either *api.PersistentVolume // or *api.PersistentVolumeClaim. Returns nil when there are no changes. func (r *volumeReactor) popChange() interface{} { @@ -303,9 +354,10 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, func newPersistentVolumeController(kubeClient clientset.Interface) *PersistentVolumeController { ctrl := &PersistentVolumeController{ - volumes: newPersistentVolumeOrderedIndex(), - claims: cache.NewStore(cache.MetaNamespaceKeyFunc), - kubeClient: kubeClient, + volumes: newPersistentVolumeOrderedIndex(), + claims: cache.NewStore(cache.MetaNamespaceKeyFunc), + kubeClient: kubeClient, + eventRecorder: record.NewFakeRecorder(1000), } return ctrl } @@ -427,6 +479,10 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto if err := reactor.checkVolumes(t, test.expectedVolumes); err != nil { t.Errorf("Test %q: %v", test.name, err) } + + if err := checkEvents(t, test.expectedEvents, ctrl); err != nil { + t.Errorf("Test %q: %v", test.name, err) + } } // Test single call to syncClaim and syncVolume methods. diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go index 23670153521..91529c02fa3 100644 --- a/pkg/controller/persistentvolume/persistentvolume_sync_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -39,7 +39,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, annBoundByController), newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending), newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim does not do anything when there is no matching volume. @@ -48,7 +48,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending), newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim resets claim.Status to Pending when there is no @@ -58,7 +58,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending), newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound), newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim binds claims to the smallest matching volume @@ -73,7 +73,7 @@ func TestSync(t *testing.T) { }, newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending), newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim binds a claim only to volume that points to it (by @@ -89,7 +89,7 @@ func TestSync(t *testing.T) { }, newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending), newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim binds a claim only to volume that points to it (by @@ -105,7 +105,7 @@ func TestSync(t *testing.T) { }, newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending), newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim does not bind claim to a volume prebound to a claim with @@ -115,7 +115,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending), newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim completes binding - simulates controller crash after @@ -125,7 +125,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, annBoundByController), newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending), newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim completes binding - simulates controller crash after @@ -135,7 +135,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, annBoundByController), newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending), newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim completes binding - simulates controller crash after @@ -145,7 +145,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, // [Unit test set 2] User asked for a specific PV. // Test the binding when pv.ClaimRef is already set by controller or @@ -157,7 +157,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that does not exist. @@ -167,7 +167,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound), newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that exists and is @@ -177,7 +177,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, annBoundByController), newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending), newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // claim with claim pre-bound to a PV that is pre-bound to the claim @@ -187,7 +187,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound), newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending), newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that is pre-bound to the @@ -198,7 +198,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound), newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending), newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that is bound to different @@ -208,7 +208,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound), newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound), newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim bound by controller to a PV that is bound to @@ -218,7 +218,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound), newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), - testSyncClaimError, + noevents, testSyncClaimError, }, // [Unit test set 3] Syncing bound claim { @@ -229,7 +229,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted), - testSyncClaim, + []string{"Warning ClaimLost"}, testSyncClaim, }, { // syncClaim with claim bound to non-exising volume. Check it's @@ -239,7 +239,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted), - testSyncClaim, + []string{"Warning ClaimLost"}, testSyncClaim, }, { // syncClaim with claim bound to unbound volume. Check it's bound. @@ -249,7 +249,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, annBoundByController), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim bound to volume with missing (or different) @@ -259,7 +259,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted), - testSyncClaim, + []string{"Warning ClaimMisbound"}, testSyncClaim, }, { // syncClaim with claim bound to bound volume. Check that the @@ -270,7 +270,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // syncClaim with claim bound to a volume that is bound to different @@ -281,7 +281,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), - testSyncClaim, + []string{"Warning ClaimMisbound"}, testSyncClaim, }, // [Unit test set 4] All syncVolume tests. { @@ -291,7 +291,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable), noclaims, noclaims, - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with prebound pending volume. Check it's marked as @@ -301,7 +301,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable), noclaims, noclaims, - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound to missing claim. @@ -311,7 +311,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased), noclaims, noclaims, - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound to claim with different UID. @@ -321,7 +321,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound by controller to unbound claim. @@ -331,7 +331,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, annBoundByController), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound by user to unbound claim. @@ -341,7 +341,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound to bound claim. @@ -351,7 +351,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound by controller to claim bound to @@ -361,7 +361,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), - testSyncVolume, + noevents, testSyncVolume, }, { // syncVolume with volume bound by user to claim bound to @@ -372,7 +372,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), - testSyncVolume, + noevents, testSyncVolume, }, } runSyncTests(t, tests) @@ -402,7 +402,7 @@ func TestMultiSync(t *testing.T) { newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, annBoundByController), newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending), newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, { // Two controllers bound two PVs to single claim. Test one of them @@ -418,7 +418,7 @@ func TestMultiSync(t *testing.T) { }, newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), - testSyncClaim, + noevents, testSyncClaim, }, } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 97bc876ed39..e27c020500f 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -769,6 +769,8 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri capacity = storage.String() } + events, _ := d.Events(namespace).Search(pvc) + return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pvc.Name) fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace) @@ -777,6 +779,10 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri printLabelsMultiline(out, "Labels", pvc.Labels) fmt.Fprintf(out, "Capacity:\t%s\n", capacity) fmt.Fprintf(out, "Access Modes:\t%s\n", accessModes) + if events != nil { + DescribeEvents(events, out) + } + return nil }) } From 7b73384fda92fa5c8126e29f948b4b06693074f1 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:15 +0200 Subject: [PATCH 14/34] Add controller method tests. --- .../persistentvolume_controller_test.go | 162 ++++++++++++++++++ .../persistentvolume_framework_test.go | 22 +++ 2 files changed, 184 insertions(+) create mode 100644 pkg/controller/persistentvolume/persistentvolume_controller_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_controller_test.go new file mode 100644 index 00000000000..8deeaeba4a3 --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_controller_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "testing" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/conversion" +) + +// Test the real controller methods (add/update/delete claim/volume) with +// a fake API server. +// There is no controller API to 'initiate syncAll now', therefore these tests +// can't reliably simulate periodic sync of volumes/claims - it would be +// either very timing-sensitive or slow to wait for real periodic sync. +func TestControllerSync(t *testing.T) { + tests := []controllerTest{ + // [Unit test set 5] - controller tests. + // We test the controller as if + // it was connected to real API server, i.e. we call add/update/delete + // Claim/Volume methods. Also, all changes to volumes and claims are + // sent to add/update/delete Claim/Volume as real controller would do. + { + // addVolume gets a new volume. Check it's marked as Available and + // that it's not bound to any claim - we bind volumes on periodic + // syncClaim, not on addVolume. + "5-1 - addVolume", + novolumes, /* added in testCall below */ + newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable), + newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), + newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), + noevents, + // Custom test function that generates an add event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending) + reactor.volumes[volume.Name] = volume + reactor.volumeSource.Add(volume) + return nil + }, + }, + { + // addClaim gets a new claim. Check it's bound to a volume. + "5-2 - complete bind", + newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable), + newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, annBoundByController), + noclaims, /* added in testAddClaim5_2 */ + newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, + // Custom test function that generates an add event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending) + reactor.claims[claim.Name] = claim + reactor.claimSource.Add(claim) + return nil + }, + }, + { + // deleteClaim with a bound claim makes bound volume released. + "5-3 - delete claim", + newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, annBoundByController), + newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, annBoundByController), + newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted), + noclaims, + noevents, + // Custom test function that generates a delete event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + obj := ctrl.claims.List()[0] + claim := obj.(*api.PersistentVolumeClaim) + // Remove the claim from list of resulting claims. + delete(reactor.claims, claim.Name) + // Poke the controller with deletion event. Cloned claim is + // needed to prevent races (and we would get a clone from etcd + // too). + clone, _ := conversion.NewCloner().DeepCopy(claim) + claimClone := clone.(*api.PersistentVolumeClaim) + reactor.claimSource.Delete(claimClone) + return nil + }, + }, + { + // deleteVolume with a bound volume. Check the claim is Lost. + "5-4 - delete volume", + newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound), + novolumes, + newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted), + []string{"Warning ClaimLost"}, + // Custom test function that generates a delete event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + obj := ctrl.volumes.store.List()[0] + volume := obj.(*api.PersistentVolume) + // Remove the volume from list of resulting volumes. + delete(reactor.volumes, volume.Name) + // Poke the controller with deletion event. Cloned volume is + // needed to prevent races (and we would get a clone from etcd + // too). + clone, _ := conversion.NewCloner().DeepCopy(volume) + volumeClone := clone.(*api.PersistentVolume) + reactor.volumeSource.Delete(volumeClone) + return nil + }, + }, + } + + for _, test := range tests { + glog.V(4).Infof("starting test %q", test.name) + + // Initialize the controller + client := &fake.Clientset{} + volumeSource := framework.NewFakeControllerSource() + claimSource := framework.NewFakeControllerSource() + ctrl := newPersistentVolumeController(client) + ctrl.initializeController(time.Minute, volumeSource, claimSource) + reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource) + for _, claim := range test.initialClaims { + claimSource.Add(claim) + reactor.claims[claim.Name] = claim + } + for _, volume := range test.initialVolumes { + volumeSource.Add(volume) + reactor.volumes[volume.Name] = volume + } + + // Start the controller + defer ctrl.Stop() + go ctrl.Run() + + // Wait for the controller to pass initial sync. + for !ctrl.isFullySynced() { + time.Sleep(10 * time.Millisecond) + } + + // Call the tested function + err := test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q initial test call failed: %v", test.name, err) + } + + reactor.waitTest() + + evaluateTestResults(ctrl, reactor, test, t) + } +} diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index 5dede9730dc..e2d4b8eb2aa 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -24,6 +24,7 @@ import ( "strings" "sync" "testing" + "time" "github.com/golang/glog" @@ -144,6 +145,8 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return true, obj, versionConflictError } volume.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("Cannot update volume %s: volume not found", volume.Name) } // Store the updated object to appropriate places. @@ -169,6 +172,8 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return true, obj, versionConflictError } claim.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("Cannot update claim %s: claim not found", claim.Name) } // Store the updated object to appropriate places. @@ -340,6 +345,23 @@ func (r *volumeReactor) getChangeCount() int { return r.changedSinceLastSync } +// waitTest waits until all tests, controllers and other goroutines do their +// job and no new actions are registered for 10 milliseconds. +func (r *volumeReactor) waitTest() { + // Check every 10ms if the controller does something and stop if it's + // idle. + oldChanges := -1 + for { + time.Sleep(10 * time.Millisecond) + changes := r.getChangeCount() + if changes == oldChanges { + // No changes for last 10ms -> controller must be idle. + break + } + oldChanges = changes + } +} + func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource, claimSource *framework.FakeControllerSource) *volumeReactor { reactor := &volumeReactor{ volumes: make(map[string]*api.PersistentVolume), From a17f0d594991765596c598d1e9684ffcc5e68fc9 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:16 +0200 Subject: [PATCH 15/34] Move release logic to standalone function. --- .../persistentvolume_controller.go | 96 ++++++++++--------- 1 file changed, 52 insertions(+), 44 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index acaea70b318..3259fbad1af 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -612,52 +612,12 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) } } - // HOWTO RELEASE A PV - if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRetain { - glog.V(4).Infof("synchronizing PersistentVolume[%s]: policy is Retain, nothing to do", volume.Name) - return nil + if err = ctrl.reclaimVolume(volume); err != nil { + // Release failed, we will fall back into the same condition + // in the next call to this method + return err } - // TODO: implement recycler return nil - /* - else if pv.Spec.ReclaimPolicy == "Delete" { - plugin := findDeleterPluginForPV(pv) - if plugin != nil { - // maintain a map with the current deleter goroutines that are running - // if the key is already present in the map, return - // - // launch the goroutine that: - // 1. deletes the storage asset - // 2. deletes the PV API object - // 3. deletes itself from the map when it's done - } else { - // make an event calling out that no deleter was configured - // mark the PV as failed - // NB: external provisioners/deleters are currently not - // considered. - } - } else if pv.Spec.ReclaimPolicy == "Recycle" { - plugin := findRecyclerPluginForPV(pv) - if plugin != nil { - // maintain a map of running scrubber-pod-monitoring - // goroutines, guarded by mutex - // - // launch a goroutine that: - // 0. verify the PV object still needs to be recycled or return - // 1. launches a scrubber pod; the pod's name is deterministically created based on PV uid - // 2. if the pod is rejected for dup, adopt the existing pod - // 2.5. if the pod is rejected for any other reason, retry later - // 3. else (the create succeeds), ok - // 4. wait for pod completion - // 5. marks the PV API object as available - // 5.5. clear ClaimRef.UID - // 5.6. if boundByController, clear ClaimRef & boundByController annotation - // 6. deletes itself from the map when it's done - } else { - // make an event calling out that no recycler was configured - // mark the PV as failed - } - }*/ } else if claim.Spec.VolumeName == "" { // This block collapses into a NOP; we're leaving this here for // completeness. @@ -988,6 +948,54 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolum } +func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolume) error { + if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRetain { + glog.V(4).Infof("synchronizing PersistentVolume[%s]: policy is Retain, nothing to do", volume.Name) + return nil + } + /* else if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { + plugin := findRecyclerPluginForPV(pv) + if plugin != nil { + // HOWTO RELEASE A PV + // maintain a map of running scrubber-pod-monitoring + // goroutines, guarded by mutex + // + // launch a goroutine that: + // 0. verify the PV object still needs to be recycled or return + // 1. launches a scrubber pod; the pod's name is deterministically created based on PV uid + // 2. if the pod is rejected for dup, adopt the existing pod + // 2.5. if the pod is rejected for any other reason, retry later + // 3. else (the create succeeds), ok + // 4. wait for pod completion + // 5. marks the PV API object as available + // 5.5. clear ClaimRef.UID + // 5.6. if boundByController, clear ClaimRef & boundByController annotation + // 6. deletes itself from the map when it's done + } else { + // make an event calling out that no recycler was configured + // mark the PV as failed + + } + } else if pv.Spec.ReclaimPolicy == "Delete" { + plugin := findDeleterPluginForPV(pv) + if plugin != nil { + // maintain a map with the current deleter goroutines that are running + // if the key is already present in the map, return + // + // launch the goroutine that: + // 1. deletes the storage asset + // 2. deletes the PV API object + // 3. deletes itself from the map when it's done + } else { + // make an event calling out that no deleter was configured + // mark the PV as failed + // NB: external provisioners/deleters are currently not + // considered. + } + */ + return nil +} + func hasAnnotation(obj api.ObjectMeta, ann string) bool { _, found := obj.Annotations[ann] return found From 4e47f69cba5d57decdde895179959cd68b55bad8 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:17 +0200 Subject: [PATCH 16/34] recycler: Implement volume host interfaces. We need the controller to implement volume.VolumeHost interface to be able to call recycle plugins. --- .../persistentvolume_controller.go | 4 + .../persistentvolume/persistentvolume_host.go | 73 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 pkg/controller/persistentvolume/persistentvolume_host.go diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 3259fbad1af..988cce6b27a 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -92,6 +92,7 @@ type PersistentVolumeController struct { claimControllerStopCh chan struct{} kubeClient clientset.Interface eventRecorder record.EventRecorder + cloud cloudprovider.Interface } // NewPersistentVolumeController creates a new PersistentVolumeController @@ -109,6 +110,7 @@ func NewPersistentVolumeController( controller := &PersistentVolumeController{ kubeClient: kubeClient, eventRecorder: recorder, + cloud: cloud, } volumeSource := &cache.ListWatch{ @@ -996,6 +998,8 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolu return nil } +// Stateless functions + func hasAnnotation(obj api.ObjectMeta, ann string) bool { _, found := obj.Annotations[ann] return found diff --git a/pkg/controller/persistentvolume/persistentvolume_host.go b/pkg/controller/persistentvolume/persistentvolume_host.go new file mode 100644 index 00000000000..f38ad0da4ee --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_host.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/io" + "k8s.io/kubernetes/pkg/util/mount" + vol "k8s.io/kubernetes/pkg/volume" +) + +// VolumeHost interface implementation for PersistentVolumeController. + +var _ vol.VolumeHost = &PersistentVolumeController{} + +func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string { + return "" +} + +func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { + return "" +} + +func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string { + return "" +} + +func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface { + return ctrl.kubeClient +} + +func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) { + return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented") +} + +func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) { + return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented") +} + +func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface { + return ctrl.cloud +} + +func (ctrl *PersistentVolumeController) GetMounter() mount.Interface { + return nil +} + +func (ctrl *PersistentVolumeController) GetWriter() io.Writer { + return nil +} + +func (ctrl *PersistentVolumeController) GetHostName() string { + return "" +} From cf68370371b9e0836cd1c17186188be5acf39fd3 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:18 +0200 Subject: [PATCH 17/34] recycler: Maintain a list of long-running operations. We need to keep list of running recyclers, deleters and provisioners in memory in order not to start a new recycling/deleting/provisioning twice for the same volume/claim. This will be eventually replaced by GoRoutineMap from PR #24838. --- .../persistentvolume_controller.go | 67 ++++++++++++++++++- 1 file changed, 64 insertions(+), 3 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 988cce6b27a..3cf1c4627d6 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -18,6 +18,7 @@ package persistentvolume import ( "fmt" + "sync" "time" "k8s.io/kubernetes/pkg/api" @@ -93,6 +94,19 @@ type PersistentVolumeController struct { kubeClient clientset.Interface eventRecorder record.EventRecorder cloud cloudprovider.Interface + + // PersistentVolumeController keeps track of long running operations and + // makes sure it won't start the same operation twice in parallel. + // Each operation is identified by unique operationName. + // Simple keymutex.KeyMutex is not enough, we need to know what operations + // are in progress (so we don't schedule a new one) and keymutex.KeyMutex + // does not provide such functionality. + + // runningOperationsMapLock guards access to runningOperations map + runningOperationsMapLock sync.Mutex + // runningOperations is map of running operations. The value does not + // matter, presence of a key is enough to consider an operation running. + runningOperations map[string]bool } // NewPersistentVolumeController creates a new PersistentVolumeController @@ -108,9 +122,10 @@ func NewPersistentVolumeController( recorder := broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) controller := &PersistentVolumeController{ - kubeClient: kubeClient, - eventRecorder: recorder, - cloud: cloud, + kubeClient: kubeClient, + eventRecorder: recorder, + runningOperations: make(map[string]bool), + cloud: cloud, } volumeSource := &cache.ListWatch{ @@ -998,6 +1013,52 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolu return nil } +// scheduleOperation starts given asynchronous operation on given volume. It +// makes sure the operation is already not running. +func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func(arg interface{}), arg interface{}) { + glog.V(4).Infof("scheduleOperation[%s]", operationName) + + isRunning := func() bool { + // In anonymous func() to get the locking right. + ctrl.runningOperationsMapLock.Lock() + defer ctrl.runningOperationsMapLock.Unlock() + + if ctrl.isOperationRunning(operationName) { + glog.V(4).Infof("operation %q is already running, skipping", operationName) + return true + } + ctrl.startRunningOperation(operationName) + return false + }() + + if isRunning { + return + } + + // Run the operation in separate goroutine + go func() { + glog.V(4).Infof("scheduleOperation[%s]: running the operation", operationName) + operation(arg) + + ctrl.runningOperationsMapLock.Lock() + defer ctrl.runningOperationsMapLock.Unlock() + ctrl.finishRunningOperation(operationName) + }() +} + +func (ctrl *PersistentVolumeController) isOperationRunning(operationName string) bool { + _, found := ctrl.runningOperations[operationName] + return found +} + +func (ctrl *PersistentVolumeController) finishRunningOperation(operationName string) { + delete(ctrl.runningOperations, operationName) +} + +func (ctrl *PersistentVolumeController) startRunningOperation(operationName string) { + ctrl.runningOperations[operationName] = true +} + // Stateless functions func hasAnnotation(obj api.ObjectMeta, ann string) bool { From 56cae2dc20307819f000a6d803bc35ad5de982ba Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:19 +0200 Subject: [PATCH 18/34] unit test framework: Wait for all running operations to finish during all tests. --- .../persistentvolume/persistentvolume_framework_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index e2d4b8eb2aa..2f2c6a0d09a 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -345,6 +345,12 @@ func (r *volumeReactor) getChangeCount() int { return r.changedSinceLastSync } +func (r *volumeReactor) getOperationCount() int { + r.ctrl.runningOperationsMapLock.Lock() + defer r.ctrl.runningOperationsMapLock.Unlock() + return len(r.ctrl.runningOperations) +} + // waitTest waits until all tests, controllers and other goroutines do their // job and no new actions are registered for 10 milliseconds. func (r *volumeReactor) waitTest() { @@ -354,7 +360,7 @@ func (r *volumeReactor) waitTest() { for { time.Sleep(10 * time.Millisecond) changes := r.getChangeCount() - if changes == oldChanges { + if changes == oldChanges && r.getOperationCount() == 0 { // No changes for last 10ms -> controller must be idle. break } From 1feb346830e88793a6f72e1887234a76c76b52cd Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:20 +0200 Subject: [PATCH 19/34] recycler: implement recycler Also update the old unit test to pass. New unit tests will be added in subsequent commit. --- .../persistentvolume_controller.go | 240 +++++++++++++++--- 1 file changed, 198 insertions(+), 42 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 3cf1c4627d6..366605044d2 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -94,6 +94,7 @@ type PersistentVolumeController struct { kubeClient clientset.Interface eventRecorder record.EventRecorder cloud cloudprovider.Interface + recyclePluginMgr vol.VolumePluginMgr // PersistentVolumeController keeps track of long running operations and // makes sure it won't start the same operation twice in parallel. @@ -127,6 +128,7 @@ func NewPersistentVolumeController( runningOperations: make(map[string]bool), cloud: cloud, } + controller.recyclePluginMgr.InitPlugins(recyclers, controller) volumeSource := &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { @@ -965,54 +967,208 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolum } +// reclaimVolume implements volume.Spec.PersistentVolumeReclaimPolicy and +// starts appropriate reclaim action. func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolume) error { - if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRetain { - glog.V(4).Infof("synchronizing PersistentVolume[%s]: policy is Retain, nothing to do", volume.Name) - return nil - } - /* else if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { - plugin := findRecyclerPluginForPV(pv) - if plugin != nil { - // HOWTO RELEASE A PV - // maintain a map of running scrubber-pod-monitoring - // goroutines, guarded by mutex - // - // launch a goroutine that: - // 0. verify the PV object still needs to be recycled or return - // 1. launches a scrubber pod; the pod's name is deterministically created based on PV uid - // 2. if the pod is rejected for dup, adopt the existing pod - // 2.5. if the pod is rejected for any other reason, retry later - // 3. else (the create succeeds), ok - // 4. wait for pod completion - // 5. marks the PV API object as available - // 5.5. clear ClaimRef.UID - // 5.6. if boundByController, clear ClaimRef & boundByController annotation - // 6. deletes itself from the map when it's done - } else { - // make an event calling out that no recycler was configured - // mark the PV as failed + switch volume.Spec.PersistentVolumeReclaimPolicy { + case api.PersistentVolumeReclaimRetain: + glog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) + case api.PersistentVolumeReclaimRecycle: + glog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) + ctrl.scheduleOperation("recycle-"+string(volume.UID), ctrl.recycleVolumeOperation, volume) + + case api.PersistentVolumeReclaimDelete: + glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) + ctrl.scheduleOperation("delete-"+string(volume.UID), ctrl.deleteVolumeOperation, volume) + + default: + // Unknown PersistentVolumeReclaimPolicy + if volume.Status.Phase != api.VolumeFailed { + // Log the error only once, when we enter 'Failed' phase + glog.V(3).Infof("reclaimVolume[%s]: volume has unrecognized PersistentVolumeReclaimPolicy %q, marking as Failed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) + ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy") } - } else if pv.Spec.ReclaimPolicy == "Delete" { - plugin := findDeleterPluginForPV(pv) - if plugin != nil { - // maintain a map with the current deleter goroutines that are running - // if the key is already present in the map, return - // - // launch the goroutine that: - // 1. deletes the storage asset - // 2. deletes the PV API object - // 3. deletes itself from the map when it's done - } else { - // make an event calling out that no deleter was configured - // mark the PV as failed - // NB: external provisioners/deleters are currently not - // considered. - } - */ + if _, err := ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + return err + } + } return nil } +// doRerecycleVolumeOperationcycleVolume recycles a volume. This method is +// running in standalone goroutine and already has all necessary locks. +func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) { + volume, ok := arg.(*api.PersistentVolume) + if !ok { + glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %+v", arg) + return + } + + glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) + + // This method may have been waiting for a volume lock for some time. + // Previous recycleVolumeOperation might just have saved an updated version, + // so read current volume state now. + newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name) + if err != nil { + glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) + return + } + needsReclaim, err := ctrl.isVolumeReleased(newVolume) + if err != nil { + glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + return + } + if !needsReclaim { + glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) + return + } + + // Use the newest volume copy, this will save us from version conflicts on + // saving. + volume = newVolume + + // Find a plugin. + spec := vol.NewSpecFromPersistentVolume(volume, false) + plugin, err := ctrl.recyclePluginMgr.FindRecyclablePluginBySpec(spec) + if err != nil { + // No recycler found. Emit an event and mark the volume Failed. + if volume.Status.Phase != api.VolumeFailed { + // Log the error only once, when we enter 'Failed' phase + glog.V(2).Infof("failed to find recycle plugin for volume %q: %v", volume.Name, err) + ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!") + } + + if _, err = ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry recycling + // the volume in every syncVolume() call. + return + } + + // Plugin found + recycler, err := plugin.NewRecycler(spec) + if err != nil { + // Cannot create recycler + if volume.Status.Phase != api.VolumeFailed { + // Log the error only once, when we enter 'Failed' phase + glog.V(2).Infof("failed to create recycler for volume %q: %v", volume.Name, err) + strerr := fmt.Sprintf("Failed to create recycler: %v", err) + ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeFailedRecycle", strerr) + } + + if _, err = ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry recycling + // the volume in every syncVolume() call. + return + } + + if err = recycler.Recycle(); err != nil { + // Recycler failed + if volume.Status.Phase != api.VolumeFailed { + // Log the error only once, when we enter 'Failed' phase + glog.V(2).Infof("recycler for volume %q failed: %v", volume.Name, err) + strerr := fmt.Sprintf("Recycler failed: %s", err) + ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeFailedRecycle", strerr) + } + + if _, err = ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry recycling + // the volume in every syncVolume() call. + return + } + + glog.V(2).Infof("volume %q recycled", volume.Name) + // Make the volume available again + if err = ctrl.unbindVolume(volume); err != nil { + // Oops, could not save the volume and therefore the controller will + // recycle the volume again on next update. We _could_ maintain a cache + // of "recently recycled volumes" and avoid unnecessary recycling, this + // is left out as future optimization. + glog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) + return + } + return +} + +// deleteVolumeOperation deletes a volume. This method is running in standalone +// goroutine and already has all necessary locks. +func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) { + volume := arg.(*api.PersistentVolume) + glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) + /*else if pv.Spec.ReclaimPolicy == "Delete" { + plugin := findDeleterPluginForPV(pv) + if plugin != nil { + // maintain a map with the current deleter goroutines that are running + // if the key is already present in the map, return + // + // launch the goroutine that: + // 1. deletes the storage asset + // 2. deletes the PV API object + // 3. deletes itself from the map when it's done + } else { + // make an event calling out that no deleter was configured + // mark the PV as failed + // NB: external provisioners/deleters are currently not + // considered. + } + */ +} + +// isVolumeReleased returns true if given volume is released and can be recycled +// or deleted, based on its retain policy. I.e. the volume is bound to a claim +// and the claim does not exist or exists and is bound to different volume. +func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentVolume) (bool, error) { + // A volume needs reclaim if it has ClaimRef and appropriate claim does not + // exist. + if volume.Spec.ClaimRef == nil { + glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) + return false, nil + } + if volume.Spec.ClaimRef.UID == "" { + // This is a volume bound by user and the controller has not finished + // binding to the real claim yet. + glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) + return false, nil + } + + var claim *api.PersistentVolumeClaim + claimName := claimrefToClaimKey(volume.Spec.ClaimRef) + obj, found, err := ctrl.claims.GetByKey(claimName) + if err != nil { + return false, err + } + if !found { + // Fall through with claim = nil + } else { + var ok bool + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + return false, fmt.Errorf("Cannot convert object from claim cache to claim!?: %+v", obj) + } + } + if claim != nil && claim.UID == volume.Spec.ClaimRef.UID { + // the claim still exists and has the right UID + glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) + return false, nil + } + + glog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) + return true, nil +} + // scheduleOperation starts given asynchronous operation on given volume. It // makes sure the operation is already not running. func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func(arg interface{}), arg interface{}) { From a08d826ca5e74f885cf50a83ab3c7eabcacdcc96 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:21 +0200 Subject: [PATCH 20/34] Make a separate functions to emit events and change status. These two seem to be always used together. --- .../persistentvolume_controller.go | 104 ++++++++++-------- 1 file changed, 57 insertions(+), 47 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 366605044d2..dcf2ac6a42c 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -485,12 +485,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu // [Unit test set 3] if claim.Spec.VolumeName == "" { // Claim was bound before but not any more. - if claim.Status.Phase != api.ClaimLost { - // Log the error only once, when we enter 'Lost' phase - glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume reference lost!", claimToClaimKey(claim)) - ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ClaimLost", "Bound claim has lost reference to PersistentVolume. Data on the volume is lost!") - } - if _, err := ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { + if _, err := ctrl.updateClaimPhaseWithEvent(claim, api.ClaimLost, api.EventTypeWarning, "ClaimLost", "Bound claim has lost reference to PersistentVolume. Data on the volume is lost!"); err != nil { return err } return nil @@ -501,12 +496,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu } if !found { // Claim is bound to a non-existing volume. - if claim.Status.Phase != api.ClaimLost { - // Log the error only once, when we enter 'Lost' phase - glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q lost!", claimToClaimKey(claim), claim.Spec.VolumeName) - ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ClaimLost", "Bound claim has lost its PersistentVolume. Data on the volume is lost!") - } - if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { + if _, err = ctrl.updateClaimPhaseWithEvent(claim, api.ClaimLost, api.EventTypeWarning, "ClaimLost", "Bound claim has lost its PersistentVolume. Data on the volume is lost!"); err != nil { return err } return nil @@ -543,12 +533,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu // Claim is bound but volume has a different claimant. // Set the claim phase to 'Lost', which is a terminal // phase. - if claim.Status.Phase != api.ClaimLost { - // Log the error only once, when we enter 'Lost' phase - glog.V(3).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q bound to another claim %q, this claim is lost!", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) - ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ClaimMisbound", "Two claims are bound to the same volume, this one is bound incorrectly") - } - if _, err = ctrl.updateClaimPhase(claim, api.ClaimLost); err != nil { + if _, err = ctrl.updateClaimPhaseWithEvent(claim, api.ClaimLost, api.EventTypeWarning, "ClaimMisbound", "Two claims are bound to the same volume, this one is bound incorrectly"); err != nil { return err } return nil @@ -741,6 +726,30 @@ func (ctrl *PersistentVolumeController) updateClaimPhase(claim *api.PersistentVo return newClaim, nil } +// updateClaimPhaseWithEvent saves new claim phase to API server and emits given +// event on the claim. It saves the phase and emits the event only when the +// phase has actually changed from the version saved in API server. +func (ctrl *PersistentVolumeController) updateClaimPhaseWithEvent(claim *api.PersistentVolumeClaim, phase api.PersistentVolumeClaimPhase, eventtype, reason, message string) (*api.PersistentVolumeClaim, error) { + glog.V(4).Infof("updating updateClaimPhaseWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) + if claim.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating updateClaimPhaseWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) + return claim, nil + } + + newClaim, err := ctrl.updateClaimPhase(claim, phase) + if err != nil { + return nil, err + } + + // Emit the event only when the status change happens, not everytime + // syncClaim is called. + glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) + ctrl.eventRecorder.Event(newClaim, eventtype, reason, message) + + return newClaim, nil +} + // updateVolumePhase saves new volume phase to API server. func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.PersistentVolume, phase api.PersistentVolumePhase) (*api.PersistentVolume, error) { glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) @@ -769,6 +778,30 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.Persistent return newVol, err } +// updateVolumePhaseWithEvent saves new volume phase to API server and emits +// given event on the volume. It saves the phase and emits the event only when +// the phase has actually changed from the version saved in API server. +func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *api.PersistentVolume, phase api.PersistentVolumePhase, eventtype, reason, message string) (*api.PersistentVolume, error) { + glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) + if volume.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) + return volume, nil + } + + newVol, err := ctrl.updateVolumePhase(volume, phase) + if err != nil { + return nil, err + } + + // Emit the event only when the status change happens, not everytime + // syncClaim is called. + glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) + ctrl.eventRecorder.Event(newVol, eventtype, reason, message) + + return newVol, nil +} + // bindVolumeToClaim modifes given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) (*api.PersistentVolume, error) { @@ -984,12 +1017,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolu default: // Unknown PersistentVolumeReclaimPolicy - if volume.Status.Phase != api.VolumeFailed { - // Log the error only once, when we enter 'Failed' phase - glog.V(3).Infof("reclaimVolume[%s]: volume has unrecognized PersistentVolumeReclaimPolicy %q, marking as Failed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) - ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy") - } - if _, err := ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + if _, err := ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy"); err != nil { return err } } @@ -1034,13 +1062,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) plugin, err := ctrl.recyclePluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { // No recycler found. Emit an event and mark the volume Failed. - if volume.Status.Phase != api.VolumeFailed { - // Log the error only once, when we enter 'Failed' phase - glog.V(2).Infof("failed to find recycle plugin for volume %q: %v", volume.Name, err) - ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!") - } - - if _, err = ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!"); err != nil { glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return @@ -1054,14 +1076,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) recycler, err := plugin.NewRecycler(spec) if err != nil { // Cannot create recycler - if volume.Status.Phase != api.VolumeFailed { - // Log the error only once, when we enter 'Failed' phase - glog.V(2).Infof("failed to create recycler for volume %q: %v", volume.Name, err) - strerr := fmt.Sprintf("Failed to create recycler: %v", err) - ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeFailedRecycle", strerr) - } - - if _, err = ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + strerr := fmt.Sprintf("Failed to create recycler: %v", err) + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", strerr); err != nil { glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return @@ -1073,14 +1089,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) if err = recycler.Recycle(); err != nil { // Recycler failed - if volume.Status.Phase != api.VolumeFailed { - // Log the error only once, when we enter 'Failed' phase - glog.V(2).Infof("recycler for volume %q failed: %v", volume.Name, err) - strerr := fmt.Sprintf("Recycler failed: %s", err) - ctrl.eventRecorder.Event(volume, api.EventTypeWarning, "VolumeFailedRecycle", strerr) - } - - if _, err = ctrl.updateVolumePhase(volume, api.VolumeFailed); err != nil { + strerr := fmt.Sprintf("Recycler failed: %s", err) + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", strerr); err != nil { glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return From 22e68d4622fe23ac494622da092aede4ccc5312a Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:22 +0200 Subject: [PATCH 21/34] recycler: unit tests - Add reclaim policy to newVolume() call. - Implement reactor Volumes().Get(). - Implement mock volume plugin. - Add recycler tests. - Add a synchronization condition to controller.scheduleOperation - we need to pause the controller here, let the test to do some bad things to the controller and test error cases in recycleVolumeOperation. Test framework gets more and more complicated... But this is the last piece, I promise. --- .../persistentvolume_controller.go | 9 + .../persistentvolume_controller_test.go | 14 +- .../persistentvolume_framework_test.go | 230 +++++++++++++++++- .../persistentvolume_recycle_test.go | 196 +++++++++++++++ .../persistentvolume_sync_test.go | 136 +++++------ 5 files changed, 502 insertions(+), 83 deletions(-) create mode 100644 pkg/controller/persistentvolume/persistentvolume_recycle_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index dcf2ac6a42c..7d47f149a05 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -108,6 +108,10 @@ type PersistentVolumeController struct { // runningOperations is map of running operations. The value does not // matter, presence of a key is enough to consider an operation running. runningOperations map[string]bool + + // For testing only: hook to call before an asynchronous operation starts. + // Not used when set to nil. + preOperationHook func(operationName string, operationArgument interface{}) } // NewPersistentVolumeController creates a new PersistentVolumeController @@ -1184,6 +1188,11 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func(arg interface{}), arg interface{}) { glog.V(4).Infof("scheduleOperation[%s]", operationName) + // Poke test code that an operation is just about to get started. + if ctrl.preOperationHook != nil { + ctrl.preOperationHook(operationName, arg) + } + isRunning := func() bool { // In anonymous func() to get the locking right. ctrl.runningOperationsMapLock.Lock() diff --git a/pkg/controller/persistentvolume/persistentvolume_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_controller_test.go index 8deeaeba4a3..67a103d06ac 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller_test.go @@ -45,13 +45,13 @@ func TestControllerSync(t *testing.T) { // syncClaim, not on addVolume. "5-1 - addVolume", novolumes, /* added in testCall below */ - newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable), + newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), noevents, // Custom test function that generates an add event func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { - volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending) + volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain) reactor.volumes[volume.Name] = volume reactor.volumeSource.Add(volume) return nil @@ -60,8 +60,8 @@ func TestControllerSync(t *testing.T) { { // addClaim gets a new claim. Check it's bound to a volume. "5-2 - complete bind", - newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable), - newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, annBoundByController), + newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), noclaims, /* added in testAddClaim5_2 */ newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted), noevents, @@ -76,8 +76,8 @@ func TestControllerSync(t *testing.T) { { // deleteClaim with a bound claim makes bound volume released. "5-3 - delete claim", - newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, annBoundByController), - newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, annBoundByController), + newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted), noclaims, noevents, @@ -99,7 +99,7 @@ func TestControllerSync(t *testing.T) { { // deleteVolume with a bound volume. Check the claim is Lost. "5-4 - delete volume", - newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound), + newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), novolumes, newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted), diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index 2f2c6a0d09a..c43dac4eab3 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -23,6 +23,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -41,6 +42,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/diff" + vol "k8s.io/kubernetes/pkg/volume" ) // This is a unit test framework for persistent volume controller. @@ -185,6 +187,17 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.changedSinceLastSync++ glog.V(4).Infof("saved updated claim %s", claim.Name) return true, claim, nil + + case action.Matches("get", "persistentvolumes"): + name := action.(core.GetAction).GetName() + volume, found := r.volumes[name] + if found { + glog.V(4).Infof("GetVolume: found %s", volume.Name) + return true, volume, nil + } else { + glog.V(4).Infof("GetVolume: volume %s not found", name) + return true, nil, fmt.Errorf("Cannot find volume %s", name) + } } return false, nil, nil } @@ -382,16 +395,31 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, func newPersistentVolumeController(kubeClient clientset.Interface) *PersistentVolumeController { ctrl := &PersistentVolumeController{ - volumes: newPersistentVolumeOrderedIndex(), - claims: cache.NewStore(cache.MetaNamespaceKeyFunc), - kubeClient: kubeClient, - eventRecorder: record.NewFakeRecorder(1000), + volumes: newPersistentVolumeOrderedIndex(), + claims: cache.NewStore(cache.MetaNamespaceKeyFunc), + kubeClient: kubeClient, + eventRecorder: record.NewFakeRecorder(1000), + runningOperations: make(map[string]bool), } return ctrl } +func addRecyclePlugin(ctrl *PersistentVolumeController, expectedRecycleCalls []error) { + plugin := &mockVolumePlugin{ + recycleCalls: expectedRecycleCalls, + } + ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) +} + +func addDeletePlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { + plugin := &mockVolumePlugin{ + deleteCalls: expectedDeleteCalls, + } + ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) +} + // newVolume returns a new volume with given attributes -func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, annotations ...string) *api.PersistentVolume { +func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) *api.PersistentVolume { volume := api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: name, @@ -404,7 +432,8 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase a PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, - AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + PersistentVolumeReclaimPolicy: reclaimPolicy, }, Status: api.PersistentVolumeStatus{ Phase: phase, @@ -433,9 +462,9 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase a // newVolumeArray returns array with a single volume that would be returned by // newVolume() with the same parameters. -func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, annotations ...string) []*api.PersistentVolume { +func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) []*api.PersistentVolume { return []*api.PersistentVolume{ - newVolume(name, capacity, boundToClaimUID, boundToClaimName, phase, annotations...), + newVolume(name, capacity, boundToClaimUID, boundToClaimName, phase, reclaimPolicy, annotations...), } } @@ -498,6 +527,66 @@ func testSyncVolume(ctrl *PersistentVolumeController, reactor *volumeReactor, te return ctrl.syncVolume(test.initialVolumes[0]) } +type operationType string + +const operationDelete = "Delete" +const operationRecycle = "Recycle" + +// wrapTestWithControllerConfig returns a testCall that: +// - configures controller with recycler or deleter which will return provided +// errors when a volume is deleted or recycled. +// - calls given testCall +func wrapTestWithControllerConfig(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { + expected := expectedOperationCalls + + return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + switch operation { + case operationDelete: + addDeletePlugin(ctrl, expected) + case operationRecycle: + addRecyclePlugin(ctrl, expected) + } + + return toWrap(ctrl, reactor, test) + } +} + +// wrapTestWithInjectedOperation returns a testCall that: +// - starts the controller and lets it run original testCall until +// scheduleOperation() call. It blocks the controller there and calls the +// injected function to simulate that something is happenning when the +// controller waits for the operation lock. Controller is then resumed and we +// check how it behaves. +func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *volumeReactor)) testCall { + + return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + // Inject a hook before async operation starts + ctrl.preOperationHook = func(operationName string, arg interface{}) { + // Inside the hook, run the function to inject + glog.V(4).Infof("reactor: scheduleOperation reached, injecting call") + injectBeforeOperation(ctrl, reactor) + } + + // Run the tested function (typically syncClaim/syncVolume) in a + // separate goroutine. + var testError error + var testFinished int32 + + go func() { + testError = toWrap(ctrl, reactor, test) + // Let the "main" test function know that syncVolume has finished. + atomic.StoreInt32(&testFinished, 1) + }() + + // Wait for the controler to finish the test function. + for atomic.LoadInt32(&testFinished) == 0 { + time.Sleep(time.Millisecond * 10) + } + + return testError + } +} + func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest, t *testing.T) { // Evaluate results if err := reactor.checkClaims(t, test.expectedClaims); err != nil { @@ -542,6 +631,9 @@ func runSyncTests(t *testing.T, tests []controllerTest) { t.Errorf("Test %q failed: %v", test.name, err) } + // Wait for all goroutines to finish + reactor.waitTest() + evaluateTestResults(ctrl, reactor, test, t) } } @@ -596,6 +688,9 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { break } + // Wait for all goroutines to finish + reactor.waitTest() + obj := reactor.popChange() if obj == nil { // Nothing was changed, should we exit? @@ -655,3 +750,122 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { glog.V(4).Infof("test %q finished after %d iterations", test.name, counter) } } + +// Dummy volume plugin for provisioning, deletion and recycling. It contains +// lists of expected return values to simulate errors. +type mockVolumePlugin struct { + provisionCalls []error + provisionCallCounter int + deleteCalls []error + deleteCallCounter int + recycleCalls []error + recycleCallCounter int +} + +var _ vol.VolumePlugin = &mockVolumePlugin{} + +func (plugin *mockVolumePlugin) Init(host vol.VolumeHost) error { + return nil +} + +func (plugin *mockVolumePlugin) Name() string { + return "mockVolumePlugin" +} + +func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool { + return true +} + +func (plugin *mockVolumePlugin) NewMounter(spec *vol.Spec, podRef *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) { + return nil, fmt.Errorf("Mounter is not supported by this plugin") +} + +func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol.Unmounter, error) { + return nil, fmt.Errorf("Unmounter is not supported by this plugin") +} + +// Provisioner interfaces + +func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) { + if len(plugin.provisionCalls) > 0 { + // mockVolumePlugin directly implements Provisioner interface + glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") + return plugin, nil + } else { + return nil, fmt.Errorf("Mock plugin error: no provisionCalls configured") + } +} + +func (plugin *mockVolumePlugin) Provision(*api.PersistentVolume) error { + if len(plugin.provisionCalls) <= plugin.provisionCallCounter { + return fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) + } + ret := plugin.provisionCalls[plugin.provisionCallCounter] + plugin.provisionCallCounter++ + glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v", plugin.provisionCallCounter, ret) + return ret +} + +func (plugin *mockVolumePlugin) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { + if len(plugin.provisionCalls) <= plugin.provisionCallCounter { + return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) + } + ret := plugin.provisionCalls[plugin.provisionCallCounter] + plugin.provisionCallCounter++ + glog.V(4).Infof("mock plugin NewPersistentVolumeTemplate call nr. %d, returning %v", plugin.provisionCallCounter, ret) + return nil, ret +} + +// Deleter interfaces + +func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) { + if len(plugin.deleteCalls) > 0 { + // mockVolumePlugin directly implements Deleter interface + glog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter") + return plugin, nil + } else { + return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured") + } +} + +func (plugin *mockVolumePlugin) Delete() error { + if len(plugin.deleteCalls) <= plugin.deleteCallCounter { + return fmt.Errorf("Mock plugin error: unexpected deleter call %d", plugin.deleteCallCounter) + } + ret := plugin.deleteCalls[plugin.deleteCallCounter] + plugin.deleteCallCounter++ + glog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret) + return ret +} + +// Volume interfaces + +func (plugin *mockVolumePlugin) GetPath() string { + return "" +} + +func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) { + return nil, nil +} + +// Recycler interfaces + +func (plugin *mockVolumePlugin) NewRecycler(spec *vol.Spec) (vol.Recycler, error) { + if len(plugin.recycleCalls) > 0 { + // mockVolumePlugin directly implements Recycler interface + glog.V(4).Infof("mock plugin NewRecycler called, returning mock recycler") + return plugin, nil + } else { + return nil, fmt.Errorf("Mock plugin error: no recycleCalls configured") + } +} + +func (plugin *mockVolumePlugin) Recycle() error { + if len(plugin.recycleCalls) <= plugin.recycleCallCounter { + return fmt.Errorf("Mock plugin error: unexpected recycle call %d", plugin.recycleCallCounter) + } + ret := plugin.recycleCalls[plugin.recycleCallCounter] + plugin.recycleCallCounter++ + glog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret) + return ret +} diff --git a/pkg/controller/persistentvolume/persistentvolume_recycle_test.go b/pkg/controller/persistentvolume/persistentvolume_recycle_test.go new file mode 100644 index 00000000000..acac9436a23 --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_recycle_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncVolume, expecting recycling to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncVolume *once*. +// 3. Compare resulting volumes with expected volumes. +func TestRecycleSync(t *testing.T) { + tests := []controllerTest{ + { + // recycle volume bound by controller + "6-1 - successful recycle", + newVolumeArray("volume6-1", "1Gi", "uid6-1", "claim6-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController), + newVolumeArray("volume6-1", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + }, + { + // recycle volume bound by user + "6-2 - successful recycle with prebound volume", + newVolumeArray("volume6-2", "1Gi", "uid6-2", "claim6-2", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-2", "1Gi", "", "claim6-2", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + }, + { + // recycle failure - plugin not found + "6-3 - plugin not found", + newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, testSyncVolume, + }, + { + // recycle failure - newRecycler returns error + "6-4 - newRecycler returns error", + newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, + wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), + }, + { + // recycle failure - recycle returns error + "6-5 - recycle returns error", + newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, + wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), + }, + { + // recycle success(?) - volume is deleted before doRecycle() starts + "6-6 - volume is deleted before recycling", + newVolumeArray("volume6-6", "1Gi", "uid6-6", "claim6-6", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + novolumes, + noclaims, + noclaims, + noevents, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Delete the volume before recycle operation starts + reactor.lock.Lock() + delete(reactor.volumes, "volume6-6") + reactor.lock.Unlock() + }), + }, + { + // recycle success(?) - volume is recycled by previous recycler just + // at the time new doRecycle() starts. This simulates "volume no + // longer needs recycling, skipping". + "6-7 - volume is deleted before recycling", + newVolumeArray("volume6-7", "1Gi", "uid6-7", "claim6-7", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController), + newVolumeArray("volume6-7", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Mark the volume as Available before the recycler starts + reactor.lock.Lock() + volume := reactor.volumes["volume6-7"] + volume.Spec.ClaimRef = nil + volume.Status.Phase = api.VolumeAvailable + volume.Annotations = nil + reactor.lock.Unlock() + }), + }, + { + // recycle success(?) - volume bound by user is recycled by previous + // recycler just at the time new doRecycle() starts. This simulates + // "volume no longer needs recycling, skipping" with volume bound by + // user. + "6-8 - prebound volume is deleted before recycling", + newVolumeArray("volume6-8", "1Gi", "uid6-8", "claim6-8", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-8", "1Gi", "", "claim6-8", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Mark the volume as Available before the recycler starts + reactor.lock.Lock() + volume := reactor.volumes["volume6-8"] + volume.Spec.ClaimRef.UID = "" + volume.Status.Phase = api.VolumeAvailable + reactor.lock.Unlock() + }), + }, + { + // recycle success - volume bound by user is recycled, while a new + // claim is created with another UID. + "6-9 - prebound volume is recycled while the claim exists", + newVolumeArray("volume6-9", "1Gi", "uid6-9", "claim6-9", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-9", "1Gi", "", "claim6-9", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending), + newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending), + noevents, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + }, + { + // volume has unknown reclaim policy - failure expected + "6-10 - unknown reclaim policy", + newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeBound, "Unknown"), + newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeFailed, "Unknown"), + noclaims, + noclaims, + []string{"Warning VolumeUnknownReclaimPolicy"}, testSyncVolume, + }, + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestRecycleMultiSync(t *testing.T) { + tests := []controllerTest{ + { + // recycle failure - recycle returns error. The controller should + // try again. + "7-1 - recycle returns error", + newVolumeArray("volume7-1", "1Gi", "uid7-1", "claim7-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume7-1", "1Gi", "", "claim7-1", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, + wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), + }, + } + + runMultisyncTests(t, tests) +} diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go index 91529c02fa3..8cdb5442b8e 100644 --- a/pkg/controller/persistentvolume/persistentvolume_sync_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -35,8 +35,8 @@ func TestSync(t *testing.T) { { // syncClaim binds to a matching unbound volume. "1-1 - successful bind", - newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending), - newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, annBoundByController), + newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending), newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted), noevents, testSyncClaim, @@ -44,8 +44,8 @@ func TestSync(t *testing.T) { { // syncClaim does not do anything when there is no matching volume. "1-2 - noop", - newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending), - newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), noevents, testSyncClaim, @@ -54,8 +54,8 @@ func TestSync(t *testing.T) { // syncClaim resets claim.Status to Pending when there is no // matching volume. "1-3 - reset to Pending", - newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending), - newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending), + newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound), newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending), noevents, testSyncClaim, @@ -64,12 +64,12 @@ func TestSync(t *testing.T) { // syncClaim binds claims to the smallest matching volume "1-4 - smallest volume", []*api.PersistentVolume{ - newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending), - newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending), + newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), }, []*api.PersistentVolume{ - newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending), - newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, annBoundByController), + newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), }, newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending), newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted), @@ -80,12 +80,12 @@ func TestSync(t *testing.T) { // name), even though a smaller one is available. "1-5 - prebound volume by name - success", []*api.PersistentVolume{ - newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending), - newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending), + newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), }, []*api.PersistentVolume{ - newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound), - newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending), + newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), }, newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending), newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted), @@ -96,12 +96,12 @@ func TestSync(t *testing.T) { // UID), even though a smaller one is available. "1-6 - prebound volume by UID - success", []*api.PersistentVolume{ - newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending), - newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending), + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), }, []*api.PersistentVolume{ - newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound), - newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending), + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), }, newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending), newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted), @@ -111,8 +111,8 @@ func TestSync(t *testing.T) { // syncClaim does not bind claim to a volume prebound to a claim with // same name and different UID "1-7 - prebound volume to different claim", - newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending), - newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending), + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), noevents, testSyncClaim, @@ -121,8 +121,8 @@ func TestSync(t *testing.T) { // syncClaim completes binding - simulates controller crash after // PV.ClaimRef is saved "1-8 - complete bind after crash - PV bound", - newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, annBoundByController), - newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, annBoundByController), + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending), newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted), noevents, testSyncClaim, @@ -131,8 +131,8 @@ func TestSync(t *testing.T) { // syncClaim completes binding - simulates controller crash after // PV.Status is saved "1-9 - complete bind after crash - PV status saved", - newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, annBoundByController), - newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, annBoundByController), + newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending), newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), noevents, testSyncClaim, @@ -141,8 +141,8 @@ func TestSync(t *testing.T) { // syncClaim completes binding - simulates controller crash after // PVC.VolumeName is saved "10 - complete bind after crash - PVC bound", - newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), - newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, annBoundByController), + newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), noevents, testSyncClaim, @@ -173,8 +173,8 @@ func TestSync(t *testing.T) { // syncClaim with claim pre-bound to a PV that exists and is // unbound. Check it gets bound and no annBoundByController is set. "2-3 - claim prebound to unbound volume", - newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending), - newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, annBoundByController), + newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending), newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted), noevents, testSyncClaim, @@ -183,8 +183,8 @@ func TestSync(t *testing.T) { // claim with claim pre-bound to a PV that is pre-bound to the claim // by name. Check it gets bound and no annBoundByController is set. "2-4 - claim prebound to prebound volume by name", - newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending), - newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound), + newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending), newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted), noevents, testSyncClaim, @@ -194,8 +194,8 @@ func TestSync(t *testing.T) { // claim by UID. Check it gets bound and no annBoundByController is // set. "2-5 - claim prebound to prebound volume by UID", - newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending), - newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound), + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending), newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted), noevents, testSyncClaim, @@ -204,8 +204,8 @@ func TestSync(t *testing.T) { // syncClaim with claim pre-bound to a PV that is bound to different // claim. Check it's reset to Pending. "2-6 - claim prebound to already bound volume", - newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound), - newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound), + newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound), newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending), noevents, testSyncClaim, @@ -214,8 +214,8 @@ func TestSync(t *testing.T) { // syncClaim with claim bound by controller to a PV that is bound to // different claim. Check it throws an error. "2-7 - claim bound by controller to already bound volume", - newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound), - newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound), + newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), noevents, testSyncClaimError, @@ -245,8 +245,8 @@ func TestSync(t *testing.T) { // syncClaim with claim bound to unbound volume. Check it's bound. // Also check that Pending phase is set to Bound "3-3 - bound claim with unbound volume", - newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending), - newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, annBoundByController), + newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted), noevents, testSyncClaim, @@ -255,8 +255,8 @@ func TestSync(t *testing.T) { // syncClaim with claim bound to volume with missing (or different) // volume.Spec.ClaimRef.UID. Check that the claim is marked as lost. "3-4 - bound claim with prebound volume", - newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending), - newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending), + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted), []string{"Warning ClaimMisbound"}, testSyncClaim, @@ -266,8 +266,8 @@ func TestSync(t *testing.T) { // controller does not do anything. Also check that Pending phase is // set to Bound "3-5 - bound claim with bound volume", - newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending), - newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound), + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted), noevents, testSyncClaim, @@ -277,8 +277,8 @@ func TestSync(t *testing.T) { // claim. Check that the claim is marked as lost. // TODO: test that an event is emitted "3-6 - bound claim with bound volume", - newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending), - newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending), + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), []string{"Warning ClaimMisbound"}, testSyncClaim, @@ -287,8 +287,8 @@ func TestSync(t *testing.T) { { // syncVolume with pending volume. Check it's marked as Available. "4-1 - pending volume", - newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending), - newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable), + newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), noclaims, noclaims, noevents, testSyncVolume, @@ -297,8 +297,8 @@ func TestSync(t *testing.T) { // syncVolume with prebound pending volume. Check it's marked as // Available. "4-2 - pending prebound volume", - newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending), - newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable), + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), noclaims, noclaims, noevents, testSyncVolume, @@ -307,8 +307,8 @@ func TestSync(t *testing.T) { // syncVolume with volume bound to missing claim. // Check the volume gets Released "4-3 - bound volume with missing claim", - newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound), - newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased), + newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain), noclaims, noclaims, noevents, testSyncVolume, @@ -317,8 +317,8 @@ func TestSync(t *testing.T) { // syncVolume with volume bound to claim with different UID. // Check the volume gets Released. "4-4 - volume bound to claim with different UID", - newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound), - newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased), + newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), noevents, testSyncVolume, @@ -327,8 +327,8 @@ func TestSync(t *testing.T) { // syncVolume with volume bound by controller to unbound claim. // Check syncVolume does not do anything. "4-5 - volume bound by controller to unbound claim", - newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, annBoundByController), - newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, annBoundByController), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), noevents, testSyncVolume, @@ -337,8 +337,8 @@ func TestSync(t *testing.T) { // syncVolume with volume bound by user to unbound claim. // Check syncVolume does not do anything. "4-5 - volume bound by user to bound claim", - newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound), - newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), noevents, testSyncVolume, @@ -347,8 +347,8 @@ func TestSync(t *testing.T) { // syncVolume with volume bound to bound claim. // Check that the volume is marked as Bound. "4-6 - volume bound by to bound claim", - newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable), - newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound), + newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), noevents, testSyncVolume, @@ -357,8 +357,8 @@ func TestSync(t *testing.T) { // syncVolume with volume bound by controller to claim bound to // another volume. Check that the volume is rolled back. "4-7 - volume bound by controller to claim bound somewhere else", - newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, annBoundByController), - newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable), + newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), noevents, testSyncVolume, @@ -368,8 +368,8 @@ func TestSync(t *testing.T) { // another volume. Check that the volume is marked as Available // and its UID is reset. "4-8 - volume bound by user to claim bound somewhere else", - newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound), - newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable), + newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), noevents, testSyncVolume, @@ -398,8 +398,8 @@ func TestMultiSync(t *testing.T) { { // syncClaim binds to a matching unbound volume. "10-1 - successful bind", - newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending), - newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, annBoundByController), + newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending), newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted), noevents, testSyncClaim, @@ -409,12 +409,12 @@ func TestMultiSync(t *testing.T) { // wins and the second rolls back. "10-2 - bind PV race", []*api.PersistentVolume{ - newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, annBoundByController), - newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, annBoundByController), + newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), }, []*api.PersistentVolume{ - newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, annBoundByController), - newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable), + newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), }, newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), From dd7890c362d966ac465a19a4d1cfe2eb1ec06132 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:23 +0200 Subject: [PATCH 22/34] delete: Implement Deleter --- .../persistentvolume_controller.go | 94 ++++++++-- .../persistentvolume_delete_test.go | 169 ++++++++++++++++++ .../persistentvolume_framework_test.go | 23 +++ 3 files changed, 268 insertions(+), 18 deletions(-) create mode 100644 pkg/controller/persistentvolume/persistentvolume_delete_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 7d47f149a05..9e3f15ddcff 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -1120,25 +1120,55 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) // deleteVolumeOperation deletes a volume. This method is running in standalone // goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) { - volume := arg.(*api.PersistentVolume) - glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) - /*else if pv.Spec.ReclaimPolicy == "Delete" { - plugin := findDeleterPluginForPV(pv) - if plugin != nil { - // maintain a map with the current deleter goroutines that are running - // if the key is already present in the map, return - // - // launch the goroutine that: - // 1. deletes the storage asset - // 2. deletes the PV API object - // 3. deletes itself from the map when it's done - } else { - // make an event calling out that no deleter was configured - // mark the PV as failed - // NB: external provisioners/deleters are currently not - // considered. + volume, ok := arg.(*api.PersistentVolume) + if !ok { + glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %+v", arg) + return } - */ + glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) + + // This method may have been waiting for a volume lock for some time. + // Previous deleteVolumeOperation might just have saved an updated version, so + // read current volume state now. + newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name) + if err != nil { + glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) + return + } + needsReclaim, err := ctrl.isVolumeReleased(newVolume) + if err != nil { + glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + return + } + if !needsReclaim { + glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) + return + } + + if err = ctrl.doDeleteVolume(volume); err != nil { + // Delete failed, update the volume and emit an event. + glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedDelete", err.Error()); err != nil { + glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry deleting + // the volume in every syncVolume() call. + return + } + + glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) + // Delete the volume + if err = ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil); err != nil { + // Oops, could not delete the volume and therefore the controller will + // try to delete the volume again on next update. We _could_ maintain a + // cache of "recently deleted volumes" and avoid unnecessary deletion, + // this is left out as future optimization. + glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) + return + } + return } // isVolumeReleased returns true if given volume is released and can be recycled @@ -1183,6 +1213,34 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV return true, nil } +// doDeleteVolume finds appropriate delete plugin and deletes given volume +// (it will be re-used in future provisioner error cases). +func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("doDeleteVolume [%s]", volume.Name) + // Find a plugin. + spec := vol.NewSpecFromPersistentVolume(volume, false) + plugin, err := ctrl.recyclePluginMgr.FindDeletablePluginBySpec(spec) + if err != nil { + // No deleter found. Emit an event and mark the volume Failed. + return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err) + } + + // Plugin found + deleter, err := plugin.NewDeleter(spec) + if err != nil { + // Cannot create deleter + return fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err) + } + + if err = deleter.Delete(); err != nil { + // Deleter failed + return fmt.Errorf("Delete of volume %q failed: %v", volume.Name, err) + } + + glog.V(2).Infof("volume %q deleted", volume.Name) + return nil +} + // scheduleOperation starts given asynchronous operation on given volume. It // makes sure the operation is already not running. func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func(arg interface{}), arg interface{}) { diff --git a/pkg/controller/persistentvolume/persistentvolume_delete_test.go b/pkg/controller/persistentvolume/persistentvolume_delete_test.go new file mode 100644 index 00000000000..c1a3dee9558 --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_delete_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncVolume, expecting recycling to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncVolume *once*. +// 3. Compare resulting volumes with expected volumes. +func TestDeleteSync(t *testing.T) { + tests := []controllerTest{ + { + // delete volume bound by controller + "8-1 - successful delete", + newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), + novolumes, + noclaims, + noclaims, + noevents, + // Inject deleter into the controller and call syncVolume. The + // deleter simulates one delete() call that succeeds. + wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + }, + { + // delete volume bound by user + "8-2 - successful delete with prebound volume", + newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + noclaims, + noclaims, + noevents, + // Inject deleter into the controller and call syncVolume. The + // deleter simulates one delete() call that succeeds. + wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + }, + { + // delete failure - plugin not found + "8-3 - plugin not found", + newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeBound, api.PersistentVolumeReclaimDelete), + newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete), + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, testSyncVolume, + }, + { + // delete failure - newDeleter returns error + "8-4 - newDeleter returns error", + newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeBound, api.PersistentVolumeReclaimDelete), + newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete), + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, + wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), + }, + { + // delete failure - delete() returns error + "8-5 - delete returns error", + newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeBound, api.PersistentVolumeReclaimDelete), + newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete), + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, + wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), + }, + { + // delete success(?) - volume is deleted before doDelete() starts + "8-6 - volume is deleted before deleting", + newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + noclaims, + noclaims, + noevents, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Delete the volume before delete operation starts + reactor.lock.Lock() + delete(reactor.volumes, "volume8-6") + reactor.lock.Unlock() + }), + }, + { + // delete success(?) - volume is bound just at the time doDelete() + // starts. This simulates "volume no longer needs recycling, + // skipping". + "8-7 - volume is bound before deleting", + newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), + newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), + noclaims, + newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound), + noevents, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + reactor.lock.Lock() + defer reactor.lock.Unlock() + // Bind the volume to ressurected claim (this should never + // happen) + claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound) + reactor.claims[claim.Name] = claim + ctrl.claims.Add(claim) + volume := reactor.volumes["volume8-7"] + volume.Status.Phase = api.VolumeBound + }), + }, + { + // delete success - volume bound by user is deleted, while a new + // claim is created with another UID. + "8-9 - prebound volume is deleted while the claim exists", + newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), + newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), + noevents, + // Inject deleter into the controller and call syncVolume. The + // deleter simulates one delete() call that succeeds. + wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + }, + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestDeleteMultiSync(t *testing.T) { + tests := []controllerTest{ + { + // delete failure - delete returns error. The controller should + // try again. + "9-1 - delete returns error", + newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, + wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), + }, + } + + runMultisyncTests(t, tests) +} diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index c43dac4eab3..a0115ae5750 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -198,7 +198,30 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj glog.V(4).Infof("GetVolume: volume %s not found", name) return true, nil, fmt.Errorf("Cannot find volume %s", name) } + + case action.Matches("delete", "persistentvolumes"): + name := action.(core.DeleteAction).GetName() + glog.V(4).Infof("deleted volume %s", name) + _, found := r.volumes[name] + if found { + delete(r.volumes, name) + return true, nil, nil + } else { + return true, nil, fmt.Errorf("Cannot delete volume %s: not found", name) + } + + case action.Matches("delete", "persistentvolumeclaims"): + name := action.(core.DeleteAction).GetName() + glog.V(4).Infof("deleted claim %s", name) + _, found := r.volumes[name] + if found { + delete(r.claims, name) + return true, nil, nil + } else { + return true, nil, fmt.Errorf("Cannot delete claim %s: not found", name) + } } + return false, nil, nil } From 75b0e2ad6320915c7ad3e487d1eb19c909e42425 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:24 +0200 Subject: [PATCH 23/34] provisioning: Refactor volume plugins. NewPersistentVolumeTemplate() and Provision() are merged into one call. --- .../persistentvolume_framework_test.go | 49 ++++++++++----- pkg/volume/aws_ebs/aws_ebs.go | 62 ++++++++----------- pkg/volume/aws_ebs/aws_ebs_test.go | 9 +-- pkg/volume/cinder/cinder.go | 27 +++----- pkg/volume/cinder/cinder_test.go | 9 +-- pkg/volume/gce_pd/gce_pd.go | 62 ++++++++----------- pkg/volume/gce_pd/gce_pd_test.go | 9 +-- pkg/volume/host_path/host_path.go | 18 +++--- pkg/volume/host_path/host_path_test.go | 2 +- pkg/volume/testing/testing.go | 13 ++-- pkg/volume/volume.go | 11 ++-- 11 files changed, 119 insertions(+), 152 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index a0115ae5750..fda289b6b33 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/diff" vol "k8s.io/kubernetes/pkg/volume" ) @@ -783,6 +784,7 @@ type mockVolumePlugin struct { deleteCallCounter int recycleCalls []error recycleCallCounter int + provisionOptions vol.VolumeOptions } var _ vol.VolumePlugin = &mockVolumePlugin{} @@ -813,30 +815,49 @@ func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.P if len(plugin.provisionCalls) > 0 { // mockVolumePlugin directly implements Provisioner interface glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") + plugin.provisionOptions = options return plugin, nil } else { return nil, fmt.Errorf("Mock plugin error: no provisionCalls configured") } } -func (plugin *mockVolumePlugin) Provision(*api.PersistentVolume) error { - if len(plugin.provisionCalls) <= plugin.provisionCallCounter { - return fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) - } - ret := plugin.provisionCalls[plugin.provisionCallCounter] - plugin.provisionCallCounter++ - glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v", plugin.provisionCallCounter, ret) - return ret -} - -func (plugin *mockVolumePlugin) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { +func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { if len(plugin.provisionCalls) <= plugin.provisionCallCounter { return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) } - ret := plugin.provisionCalls[plugin.provisionCallCounter] + + var pv *api.PersistentVolume + err := plugin.provisionCalls[plugin.provisionCallCounter] + if err == nil { + // Create a fake PV + fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID()) + + pv = &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: plugin.provisionOptions.PVName, + Annotations: map[string]string{ + "kubernetes.io/createdby": "hostpath-dynamic-provisioner", + }, + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy, + AccessModes: plugin.provisionOptions.AccessModes, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): plugin.provisionOptions.Capacity, + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{ + Path: fullpath, + }, + }, + }, + } + } + plugin.provisionCallCounter++ - glog.V(4).Infof("mock plugin NewPersistentVolumeTemplate call nr. %d, returning %v", plugin.provisionCallCounter, ret) - return nil, ret + glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, err) + return pv, err } // Deleter interfaces diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/aws_ebs/aws_ebs.go index dfbdae80533..d21bf0cac12 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/aws_ebs/aws_ebs.go @@ -408,14 +408,35 @@ type awsElasticBlockStoreProvisioner struct { var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{} -func (c *awsElasticBlockStoreProvisioner) Provision(pv *api.PersistentVolume) error { +func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, error) { volumeID, sizeGB, labels, err := c.manager.CreateVolume(c) if err != nil { - return err + return nil, err } - pv.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID = volumeID - pv.Spec.Capacity = api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: c.options.PVName, + Labels: map[string]string{}, + Annotations: map[string]string{ + "kubernetes.io/createdby": "aws-ebs-dynamic-provisioner", + }, + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, + AccessModes: c.options.AccessModes, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ + VolumeID: volumeID, + FSType: "ext4", + Partition: 0, + ReadOnly: false, + }, + }, + }, } if len(labels) != 0 { @@ -427,34 +448,5 @@ func (c *awsElasticBlockStoreProvisioner) Provision(pv *api.PersistentVolume) er } } - return nil -} - -func (c *awsElasticBlockStoreProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { - // Provide dummy api.PersistentVolume.Spec, it will be filled in - // awsElasticBlockStoreProvisioner.Provision() - return &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ - GenerateName: "pv-aws-", - Labels: map[string]string{}, - Annotations: map[string]string{ - "kubernetes.io/createdby": "aws-ebs-dynamic-provisioner", - }, - }, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, - AccessModes: c.options.AccessModes, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): c.options.Capacity, - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ - VolumeID: volume.ProvisionedVolumeName, - FSType: "ext4", - Partition: 0, - ReadOnly: false, - }, - }, - }, - }, nil + return pv, nil } diff --git a/pkg/volume/aws_ebs/aws_ebs_test.go b/pkg/volume/aws_ebs/aws_ebs_test.go index c0193021f00..02e2d098e0a 100644 --- a/pkg/volume/aws_ebs/aws_ebs_test.go +++ b/pkg/volume/aws_ebs/aws_ebs_test.go @@ -220,14 +220,7 @@ func TestPlugin(t *testing.T) { PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{}) - persistentSpec, err := provisioner.NewPersistentVolumeTemplate() - if err != nil { - t.Errorf("NewPersistentVolumeTemplate() failed: %v", err) - } - - // get 2nd Provisioner - persistent volume controller will do the same - provisioner, err = plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{}) - err = provisioner.Provision(persistentSpec) + persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index b4dcaedb8a5..e680528c217 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -427,25 +427,16 @@ type cinderVolumeProvisioner struct { var _ volume.Provisioner = &cinderVolumeProvisioner{} -func (c *cinderVolumeProvisioner) Provision(pv *api.PersistentVolume) error { +func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) { volumeID, sizeGB, err := c.manager.CreateVolume(c) if err != nil { - return err + return nil, err } - pv.Spec.PersistentVolumeSource.Cinder.VolumeID = volumeID - pv.Spec.Capacity = api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), - } - return nil -} -func (c *cinderVolumeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { - // Provide dummy api.PersistentVolume.Spec, it will be filled in - // cinderVolumeProvisioner.Provision() - return &api.PersistentVolume{ + pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ - GenerateName: "pv-cinder-", - Labels: map[string]string{}, + Name: c.options.PVName, + Labels: map[string]string{}, Annotations: map[string]string{ "kubernetes.io/createdby": "cinder-dynamic-provisioner", }, @@ -454,16 +445,16 @@ func (c *cinderVolumeProvisioner) NewPersistentVolumeTemplate() (*api.Persistent PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, AccessModes: c.options.AccessModes, Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): c.options.Capacity, + api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ Cinder: &api.CinderVolumeSource{ - VolumeID: volume.ProvisionedVolumeName, + VolumeID: volumeID, FSType: "ext4", ReadOnly: false, }, }, }, - }, nil - + } + return pv, nil } diff --git a/pkg/volume/cinder/cinder_test.go b/pkg/volume/cinder/cinder_test.go index 6f1a53eceba..facbbc8f47c 100644 --- a/pkg/volume/cinder/cinder_test.go +++ b/pkg/volume/cinder/cinder_test.go @@ -212,14 +212,7 @@ func TestPlugin(t *testing.T) { PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0}) - persistentSpec, err := provisioner.NewPersistentVolumeTemplate() - if err != nil { - t.Errorf("NewPersistentVolumeTemplate() failed: %v", err) - } - - // get 2nd Provisioner - persistent volume controller will do the same - provisioner, err = plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0}) - err = provisioner.Provision(persistentSpec) + persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 006ed573496..32ea1043420 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -370,14 +370,35 @@ type gcePersistentDiskProvisioner struct { var _ volume.Provisioner = &gcePersistentDiskProvisioner{} -func (c *gcePersistentDiskProvisioner) Provision(pv *api.PersistentVolume) error { +func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error) { volumeID, sizeGB, labels, err := c.manager.CreateVolume(c) if err != nil { - return err + return nil, err } - pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName = volumeID - pv.Spec.Capacity = api.ResourceList{ - api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: c.options.PVName, + Labels: map[string]string{}, + Annotations: map[string]string{ + "kubernetes.io/createdby": "gce-pd-dynamic-provisioner", + }, + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, + AccessModes: c.options.AccessModes, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: volumeID, + FSType: "ext4", + Partition: 0, + ReadOnly: false, + }, + }, + }, } if len(labels) != 0 { @@ -389,34 +410,5 @@ func (c *gcePersistentDiskProvisioner) Provision(pv *api.PersistentVolume) error } } - return nil -} - -func (c *gcePersistentDiskProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { - // Provide dummy api.PersistentVolume.Spec, it will be filled in - // gcePersistentDiskProvisioner.Provision() - return &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ - GenerateName: "pv-gce-", - Labels: map[string]string{}, - Annotations: map[string]string{ - "kubernetes.io/createdby": "gce-pd-dynamic-provisioner", - }, - }, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, - AccessModes: c.options.AccessModes, - Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): c.options.Capacity, - }, - PersistentVolumeSource: api.PersistentVolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ - PDName: volume.ProvisionedVolumeName, - FSType: "ext4", - Partition: 0, - ReadOnly: false, - }, - }, - }, - }, nil + return pv, nil } diff --git a/pkg/volume/gce_pd/gce_pd_test.go b/pkg/volume/gce_pd/gce_pd_test.go index 4987f2bd665..9c2bf7e5ffe 100644 --- a/pkg/volume/gce_pd/gce_pd_test.go +++ b/pkg/volume/gce_pd/gce_pd_test.go @@ -216,14 +216,7 @@ func TestPlugin(t *testing.T) { PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) - persistentSpec, err := provisioner.NewPersistentVolumeTemplate() - if err != nil { - t.Errorf("NewPersistentVolumeTemplate() failed: %v", err) - } - - // get 2nd Provisioner - persistent volume controller will do the same - provisioner, err = plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) - err = provisioner.Provision(persistentSpec) + persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index b0ddb0aa020..ff2d9c667ff 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -252,18 +252,12 @@ type hostPathProvisioner struct { // Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume. // This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster. -func (r *hostPathProvisioner) Provision(pv *api.PersistentVolume) error { - if pv.Spec.HostPath == nil { - return fmt.Errorf("pv.Spec.HostPath cannot be nil") - } - return os.MkdirAll(pv.Spec.HostPath.Path, 0750) -} - -func (r *hostPathProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { +func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID()) - return &api.PersistentVolume{ + + pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ - GenerateName: "pv-hostpath-", + Name: r.options.PVName, Annotations: map[string]string{ "kubernetes.io/createdby": "hostpath-dynamic-provisioner", }, @@ -280,7 +274,9 @@ func (r *hostPathProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolu }, }, }, - }, nil + } + + return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750) } // hostPathDeleter deletes a hostPath PV from the cluster. diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index 440f4cfdeee..6a6e9e2f3bb 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -163,7 +163,7 @@ func TestProvisioner(t *testing.T) { if err != nil { t.Errorf("Failed to make a new Provisioner: %v", err) } - pv, err := creater.NewPersistentVolumeTemplate() + pv, err := creater.Provision() if err != nil { t.Errorf("Unexpected error creating volume: %v", err) } diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index e7ecb96f351..425c148550c 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -340,11 +340,12 @@ type FakeProvisioner struct { Host VolumeHost } -func (fc *FakeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { +func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID()) - return &api.PersistentVolume{ + + pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ - GenerateName: "pv-fakeplugin-", + Name: fc.Options.PVName, Annotations: map[string]string{ "kubernetes.io/createdby": "fakeplugin-provisioner", }, @@ -361,11 +362,9 @@ func (fc *FakeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, }, }, }, - }, nil -} + } -func (fc *FakeProvisioner) Provision(pv *api.PersistentVolume) error { - return nil + return pv, nil } // FindEmptyDirectoryUsageOnTmpfs finds the expected usage of an empty directory existing on diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index 88a4e6d871e..9a609e18bb0 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -111,13 +111,10 @@ type Recycler interface { // Provisioner is an interface that creates templates for PersistentVolumes and can create the volume // as a new resource in the infrastructure provider. type Provisioner interface { - // Provision creates the resource by allocating the underlying volume in a storage system. - // This method should block until completion. - Provision(*api.PersistentVolume) error - // NewPersistentVolumeTemplate creates a new PersistentVolume to be used as a template before saving. - // The provisioner will want to tweak its properties, assign correct annotations, etc. - // This func should *NOT* persist the PV in the API. That is left to the caller. - NewPersistentVolumeTemplate() (*api.PersistentVolume, error) + // Provision creates the resource by allocating the underlying volume in a + // storage system. This method should block until completion and returns + // PersistentVolume representing the created storage resource. + Provision() (*api.PersistentVolume, error) } // Deleter removes the resource from the underlying storage provider. Calls to this method should block until From 514d5958818222b89066d2fe44528d3c12f7d318 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:25 +0200 Subject: [PATCH 24/34] provisioning: Implement provisioner --- .../app/controllermanager.go | 1 + .../controllermanager/controllermanager.go | 1 + .../persistentvolume_controller.go | 216 ++++++++++++++++-- .../persistentvolume_framework_test.go | 4 + 4 files changed, 197 insertions(+), 25 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 0f5c46b5d77..1a2bf5ca1c7 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -384,6 +384,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig provisioner, ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, + s.ClusterName, ) volumeController.Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index 46d9c48ea9e..36ee2e6546d 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -282,6 +282,7 @@ func (s *CMServer) Run(_ []string) error { provisioner, kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, + s.ClusterName, ) volumeController.Run() diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 9e3f15ddcff..6482597ee14 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -80,6 +80,30 @@ const annBoundByController = "pv.kubernetes.io/bound-by-controller" // Value of this annotation should be empty. const annClass = "volume.alpha.kubernetes.io/storage-class" +// This annotation is added to a PV that has been dynamically provisioned by +// Kubernetes. It's value is name of volume plugin that created the volume. +// It serves both user (to show where a PV comes from) and Kubernetes (to +// recognize dynamically provisioned PVs in its decissions). +const annDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + +// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) +// with namespace of a persistent volume claim used to create this volume. +const cloudVolumeCreatedForClaimNamespaceTag = "kubernetes.io/created-for/pvc/namespace" + +// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) +// with name of a persistent volume claim used to create this volume. +const cloudVolumeCreatedForClaimNameTag = "kubernetes.io/created-for/pvc/name" + +// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) +// with name of appropriate Kubernetes persistent volume . +const cloudVolumeCreatedForVolumeNameTag = "kubernetes.io/created-for/pv/name" + +// Number of retries when we create a PV object for a provisioned volume. +const createProvisionedPVRetryCount = 5 + +// Interval between retries when we create a PV object for a provisioned volume. +const createProvisionedPVInterval = 10 * time.Second + // PersistentVolumeController is a controller that synchronizes // PersistentVolumeClaims and PersistentVolumes. It starts two // framework.Controllers that watch PerstentVolume and PersistentVolumeClaim @@ -95,6 +119,8 @@ type PersistentVolumeController struct { eventRecorder record.EventRecorder cloud cloudprovider.Interface recyclePluginMgr vol.VolumePluginMgr + provisioner vol.ProvisionableVolumePlugin + clusterName string // PersistentVolumeController keeps track of long running operations and // makes sure it won't start the same operation twice in parallel. @@ -112,6 +138,9 @@ type PersistentVolumeController struct { // For testing only: hook to call before an asynchronous operation starts. // Not used when set to nil. preOperationHook func(operationName string, operationArgument interface{}) + + createProvisionedPVRetryCount int + createProvisionedPVInterval time.Duration } // NewPersistentVolumeController creates a new PersistentVolumeController @@ -120,19 +149,29 @@ func NewPersistentVolumeController( syncPeriod time.Duration, provisioner vol.ProvisionableVolumePlugin, recyclers []vol.VolumePlugin, - cloud cloudprovider.Interface) *PersistentVolumeController { + cloud cloudprovider.Interface, + clusterName string) *PersistentVolumeController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) recorder := broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) controller := &PersistentVolumeController{ - kubeClient: kubeClient, - eventRecorder: recorder, - runningOperations: make(map[string]bool), - cloud: cloud, + kubeClient: kubeClient, + eventRecorder: recorder, + runningOperations: make(map[string]bool), + cloud: cloud, + provisioner: provisioner, + clusterName: clusterName, + createProvisionedPVRetryCount: createProvisionedPVRetryCount, + createProvisionedPVInterval: createProvisionedPVInterval, } controller.recyclePluginMgr.InitPlugins(recyclers, controller) + if controller.provisioner != nil { + if err := controller.provisioner.Init(controller); err != nil { + glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) + } + } volumeSource := &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { @@ -376,25 +415,10 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVo // No PV could be found // OBSERVATION: pvc is "Pending", will retry if hasAnnotation(claim.ObjectMeta, annClass) { - // TODO: provisioning - //plugin := findProvisionerPluginForPV(pv) // Need to flesh this out - //if plugin != nil { - //FIXME: left off here - // No match was found and provisioning was requested. - // - // maintain a map with the current provisioner goroutines that are running - // if the key is already present in the map, return - // - // launch the goroutine that: - // 1. calls plugin.Provision to make the storage asset - // 2. gets back a PV object (partially filled) - // 3. create the PV API object, with claimRef -> pvc - // 4. deletes itself from the map when it's done - // return - //} else { - // make an event calling out that no provisioner was configured - // return, try later? - //} + if err = ctrl.provisionClaim(claim); err != nil { + return err + } + return nil } // Mark the claim as Pending and try to find a match in the next // periodic syncClaim @@ -1036,7 +1060,6 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %+v", arg) return } - glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) // This method may have been waiting for a volume lock for some time. @@ -1241,6 +1264,149 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVol return nil } +// provisionClaim starts new asynchronous operation to provision a claim. +func (ctrl *PersistentVolumeController) provisionClaim(claim *api.PersistentVolumeClaim) error { + glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) + ctrl.scheduleOperation("provision-"+string(claim.UID), ctrl.provisionClaimOperation, claim) + return nil +} + +// provisionClaimOperation provisions a volume. This method is running in +// standalone goroutine and already has all necessary locks. +func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interface{}) { + claim, ok := claimObj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %+v", claimObj) + return + } + glog.V(4).Infof("provisionClaimOperation [%s] started", claimToClaimKey(claim)) + + // A previous doProvisionClaim may just have finished while we were waiting for + // the locks. Check that PV (with deterministic name) hasn't been provisioned + // yet. + + pvName := ctrl.getProvisionedVolumeNameForClaim(claim) + volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName) + if err == nil && volume != nil { + // Volume has been already provisioned, nothing to do. + glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) + return + } + + // Prepare a claimRef to the claim early (to fail before a volume is + // provisioned) + claimRef, err := api.GetReference(claim) + if err != nil { + glog.V(3).Infof("unexpected error getting claim reference: %v", err) + return + } + + // TODO: find provisionable plugin based on a class/profile + plugin := ctrl.provisioner + if plugin == nil { + // No provisioner found. Emit an event. + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", "No provisioner plugin found for the claim!") + glog.V(2).Infof("no provisioner plugin found for claim %s!", claimToClaimKey(claim)) + // The controller will retry provisioning the volume in every + // syncVolume() call. + return + } + + // Gather provisioning options + tags := make(map[string]string) + tags[cloudVolumeCreatedForClaimNamespaceTag] = claim.Namespace + tags[cloudVolumeCreatedForClaimNameTag] = claim.Name + tags[cloudVolumeCreatedForVolumeNameTag] = pvName + + options := vol.VolumeOptions{ + Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], + AccessModes: claim.Spec.AccessModes, + PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, + CloudTags: &tags, + ClusterName: ctrl.clusterName, + PVName: pvName, + } + + // Provision the volume + provisioner, err := plugin.NewProvisioner(options) + if err != nil { + strerr := fmt.Sprintf("Failed to create provisioner: %v", err) + glog.V(2).Infof("failed to create provisioner for claim %q: %v", claimToClaimKey(claim), err) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) + return + } + + volume, err = provisioner.Provision() + if err != nil { + strerr := fmt.Sprintf("Failed to provision volume: %v", err) + glog.V(2).Infof("failed to provision volume for claim %q: %v", claimToClaimKey(claim), err) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) + return + } + + glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) + + // Create Kubernetes PV object for the volume. + volume.Name = pvName + // Bind it to the claim + volume.Spec.ClaimRef = claimRef + volume.Status.Phase = api.VolumeBound + + // Add annBoundByController (used in deleting the volume) + setAnnotation(&volume.ObjectMeta, annBoundByController, "yes") + setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.Name()) + + // Try to create the PV object several times + for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { + glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) + if _, err = ctrl.kubeClient.Core().PersistentVolumes().Create(volume); err == nil { + // Save succeeded. + glog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) + break + } + // Save failed, try again after a while. + glog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) + time.Sleep(ctrl.createProvisionedPVInterval) + } + + if err != nil { + // Save failed. Now we have a storage asset outside of Kubernetes, + // but we don't have appropriate PV object for it. + // Emit some event here and try to delete the storage asset several + // times. + strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err) + glog.V(3).Info(strerr) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) + + for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { + if err = ctrl.doDeleteVolume(volume); err == nil { + // Delete succeeded + glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) + break + } + // Delete failed, try again after a while. + glog.V(3).Infof("failed to delete volume %q: %v", volume.Name, i, err) + time.Sleep(ctrl.createProvisionedPVInterval) + } + + if err != nil { + // Delete failed several times. There is orphaned volume and there + // is nothing we can do about it. + strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), err) + glog.V(2).Info(strerr) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningCleanupFailed", strerr) + } + } else { + glog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) + } +} + +// getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume. +// The name must be unique +func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim *api.PersistentVolumeClaim) string { + return "pv-provisioned-for-" + string(claim.UID) +} + // scheduleOperation starts given asynchronous operation on given volume. It // makes sure the operation is already not running. func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func(arg interface{}), arg interface{}) { diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index fda289b6b33..daca2f085c8 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -424,6 +424,10 @@ func newPersistentVolumeController(kubeClient clientset.Interface) *PersistentVo kubeClient: kubeClient, eventRecorder: record.NewFakeRecorder(1000), runningOperations: make(map[string]bool), + + // Speed up the testing + createProvisionedPVRetryCount: createProvisionedPVRetryCount, + createProvisionedPVInterval: 5 * time.Millisecond, } return ctrl } From 9fb0f7a3fdfa5825bc82e204ed6a3449b3b38ed1 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:26 +0200 Subject: [PATCH 25/34] provisioning: Unit tests --- .../persistentvolume_framework_test.go | 59 ++++++-- .../persistentvolume_provision_test.go | 137 ++++++++++++++++++ 2 files changed, 180 insertions(+), 16 deletions(-) create mode 100644 pkg/controller/persistentvolume/persistentvolume_provision_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index daca2f085c8..8558d89fcc0 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -41,7 +41,6 @@ import ( "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/diff" vol "k8s.io/kubernetes/pkg/volume" ) @@ -91,6 +90,7 @@ type controllerTest struct { type testCall func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error const testNamespace = "default" +const mockPluginName = "MockVolumePlugin" var versionConflictError = errors.New("VersionError") var novolumes []*api.PersistentVolume @@ -135,6 +135,26 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) switch { + case action.Matches("create", "persistentvolumes"): + obj := action.(core.UpdateAction).GetObject() + volume := obj.(*api.PersistentVolume) + + // check the volume does not exist + _, found := r.volumes[volume.Name] + if found { + return true, nil, fmt.Errorf("Cannot create volume %s: volume already exists", volume.Name) + } + + // Store the updated object to appropriate places. + if r.volumeSource != nil { + r.volumeSource.Add(volume) + } + r.volumes[volume.Name] = volume + r.changedObjects = append(r.changedObjects, volume) + r.changedSinceLastSync++ + glog.V(4).Infof("created volume %s", volume.Name) + return true, volume, nil + case action.Matches("update", "persistentvolumes"): obj := action.(core.UpdateAction).GetObject() volume := obj.(*api.PersistentVolume) @@ -446,6 +466,13 @@ func addDeletePlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []err ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) } +func addProvisionPlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { + plugin := &mockVolumePlugin{ + provisionCalls: expectedDeleteCalls, + } + ctrl.provisioner = plugin +} + // newVolume returns a new volume with given attributes func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) *api.PersistentVolume { volume := api.PersistentVolume{ @@ -481,7 +508,11 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase a if len(annotations) > 0 { volume.Annotations = make(map[string]string) for _, a := range annotations { - volume.Annotations[a] = "yes" + if a != annDynamicallyProvisioned { + volume.Annotations[a] = "yes" + } else { + volume.Annotations[a] = mockPluginName + } } } @@ -559,10 +590,11 @@ type operationType string const operationDelete = "Delete" const operationRecycle = "Recycle" +const operationProvision = "Provision" // wrapTestWithControllerConfig returns a testCall that: -// - configures controller with recycler or deleter which will return provided -// errors when a volume is deleted or recycled. +// - configures controller with recycler, deleter or provisioner which will +// return provided errors when a volume is deleted, recycled or provisioned // - calls given testCall func wrapTestWithControllerConfig(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { expected := expectedOperationCalls @@ -573,6 +605,8 @@ func wrapTestWithControllerConfig(operation operationType, expectedOperationCall addDeletePlugin(ctrl, expected) case operationRecycle: addRecyclePlugin(ctrl, expected) + case operationProvision: + addProvisionPlugin(ctrl, expected) } return toWrap(ctrl, reactor, test) @@ -798,7 +832,7 @@ func (plugin *mockVolumePlugin) Init(host vol.VolumeHost) error { } func (plugin *mockVolumePlugin) Name() string { - return "mockVolumePlugin" + return mockPluginName } func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool { @@ -834,26 +868,19 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { var pv *api.PersistentVolume err := plugin.provisionCalls[plugin.provisionCallCounter] if err == nil { - // Create a fake PV - fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID()) - + // Create a fake PV with known GCE volume (to match expected volume) pv = &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: plugin.provisionOptions.PVName, - Annotations: map[string]string{ - "kubernetes.io/createdby": "hostpath-dynamic-provisioner", - }, }, Spec: api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy, - AccessModes: plugin.provisionOptions.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): plugin.provisionOptions.Capacity, }, + AccessModes: plugin.provisionOptions.AccessModes, + PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy, PersistentVolumeSource: api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{ - Path: fullpath, - }, + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, }, } diff --git a/pkg/controller/persistentvolume/persistentvolume_provision_test.go b/pkg/controller/persistentvolume/persistentvolume_provision_test.go new file mode 100644 index 00000000000..5655c4e6294 --- /dev/null +++ b/pkg/controller/persistentvolume/persistentvolume_provision_test.go @@ -0,0 +1,137 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncVolume, expecting provisioning to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncVolume *once*. +// 3. Compare resulting volumes with expected volumes. +func TestProvisionSync(t *testing.T) { + tests := []controllerTest{ + { + // Provision a volume + "11-1 - successful provision", + novolumes, + newVolumeArray("pv-provisioned-for-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), + noevents, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision failure - plugin not found + "11-2 - plugin not found", + novolumes, + novolumes, + newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + testSyncClaim, + }, + { + // Provision failure - newProvisioner returns error + "11-3 - newProvisioner failure", + novolumes, + novolumes, + newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), + }, + { + // Provision failure - Provision returns error + "11-4 - provision failure", + novolumes, + novolumes, + newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim), + }, + { + // Provision success - there is already a volume available, still + // we provision a new one when requested. + "11-6 - provisioning when there is a volume available", + newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + []*api.PersistentVolume{ + newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("pv-provisioned-for-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + }, + newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), + noevents, + // No provisioning plugin confingure - makes the test fail when + // the controller errorneously tries to provision something + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + /* { + // Provision success? - claim is bound before provisioner creates + // a volume. + "11-7 - claim is bound before provisioning", + novolumes, + novolumes, + []*api.PersistentVolumeClaim{ + newClaim("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), + }, + []*api.PersistentVolumeClaim{ + newClaim("claim11-7", "uid11-7", "1Gi", "volume11-7", api.ClaimBound, annClass, annBindCompleted), + }, + []string{"Warning ProvisioningFailed"}, + getSyncClaimWithOperation(operationProvision, []error{errors.New("Moc provisioner error")}), + }, */ + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestProvisionMultiSync(t *testing.T) { + tests := []controllerTest{ + { + // Provision a volume with binding + "12-1 - successful provision", + novolumes, + newVolumeArray("pv-provisioned-for-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim12-1", "uid12-1", "1Gi", "pv-provisioned-for-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), + noevents, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + } + + runMultisyncTests(t, tests) +} From 92dc159ab6c18137e12fbbc6d1c9cc5a5033d9a4 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:27 +0200 Subject: [PATCH 26/34] Delete provisioned volumes that are not needed. We should delete volumes that are provisioned for a claim and the claim gets bound to different volume during the provisioning. --- .../persistentvolume_controller.go | 42 ++++++++++++------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 6482597ee14..956feced3c0 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -672,25 +672,39 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) return nil } else { // Volume is bound to a claim, but the claim is bound elsewhere - if hasAnnotation(volume.ObjectMeta, annBoundByController) { - // This is part of the normal operation of the controller; the - // controller tried to use this volume for a claim but the claim - // was fulfilled by another volume. We did this; fix it. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) - if err = ctrl.unbindVolume(volume); err != nil { + if hasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) && volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimDelete { + // This volume was dynamically provisioned for this claim. The + // claim got bound elsewhere, and thus this volume is not + // needed. Delete it. + if err = ctrl.reclaimVolume(volume); err != nil { + // Deletion failed, we will fall back into the same condition + // in the next call to this method return err } return nil } else { - // The PV must have been created with this ptr; leave it alone. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) - // This just updates the volume phase and clears - // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound - // to the claim. - if err = ctrl.unbindVolume(volume); err != nil { - return err + // Volume is bound to a claim, but the claim is bound elsewhere + // and it's not dynamically provisioned. + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // This is part of the normal operation of the controller; the + // controller tried to use this volume for a claim but the claim + // was fulfilled by another volume. We did this; fix it. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) + if err = ctrl.unbindVolume(volume); err != nil { + return err + } + return nil + } else { + // The PV must have been created with this ptr; leave it alone. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) + // This just updates the volume phase and clears + // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound + // to the claim. + if err = ctrl.unbindVolume(volume); err != nil { + return err + } + return nil } - return nil } } } From c24b33793c8fc383ba6c6f556350f9b18720ef04 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:28 +0200 Subject: [PATCH 27/34] unit test: Add possibility to inject kubeclient errors. --- .../persistentvolume_controller_test.go | 10 +-- .../persistentvolume_delete_test.go | 18 ++--- .../persistentvolume_framework_test.go | 53 ++++++++++++++- .../persistentvolume_provision_test.go | 10 +-- .../persistentvolume_recycle_test.go | 22 +++--- .../persistentvolume_sync_test.go | 68 +++++++++---------- 6 files changed, 114 insertions(+), 67 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_controller_test.go index 67a103d06ac..552874f331f 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller_test.go @@ -48,7 +48,7 @@ func TestControllerSync(t *testing.T) { newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), - noevents, + noevents, noerrors, // Custom test function that generates an add event func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain) @@ -64,7 +64,7 @@ func TestControllerSync(t *testing.T) { newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), noclaims, /* added in testAddClaim5_2 */ newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, + noevents, noerrors, // Custom test function that generates an add event func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending) @@ -80,7 +80,7 @@ func TestControllerSync(t *testing.T) { newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted), noclaims, - noevents, + noevents, noerrors, // Custom test function that generates a delete event func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { obj := ctrl.claims.List()[0] @@ -103,7 +103,7 @@ func TestControllerSync(t *testing.T) { novolumes, newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted), - []string{"Warning ClaimLost"}, + []string{"Warning ClaimLost"}, noerrors, // Custom test function that generates a delete event func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { obj := ctrl.volumes.store.List()[0] @@ -130,7 +130,7 @@ func TestControllerSync(t *testing.T) { claimSource := framework.NewFakeControllerSource() ctrl := newPersistentVolumeController(client) ctrl.initializeController(time.Minute, volumeSource, claimSource) - reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource) + reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) for _, claim := range test.initialClaims { claimSource.Add(claim) reactor.claims[claim.Name] = claim diff --git a/pkg/controller/persistentvolume/persistentvolume_delete_test.go b/pkg/controller/persistentvolume/persistentvolume_delete_test.go index c1a3dee9558..03ef84b94d4 100644 --- a/pkg/controller/persistentvolume/persistentvolume_delete_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_delete_test.go @@ -36,7 +36,7 @@ func TestDeleteSync(t *testing.T) { novolumes, noclaims, noclaims, - noevents, + noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), @@ -48,7 +48,7 @@ func TestDeleteSync(t *testing.T) { novolumes, noclaims, noclaims, - noevents, + noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), @@ -60,7 +60,7 @@ func TestDeleteSync(t *testing.T) { newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete), noclaims, noclaims, - []string{"Warning VolumeFailedDelete"}, testSyncVolume, + []string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume, }, { // delete failure - newDeleter returns error @@ -69,7 +69,7 @@ func TestDeleteSync(t *testing.T) { newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete), noclaims, noclaims, - []string{"Warning VolumeFailedDelete"}, + []string{"Warning VolumeFailedDelete"}, noerrors, wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), }, { @@ -79,7 +79,7 @@ func TestDeleteSync(t *testing.T) { newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete), noclaims, noclaims, - []string{"Warning VolumeFailedDelete"}, + []string{"Warning VolumeFailedDelete"}, noerrors, wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), }, { @@ -89,7 +89,7 @@ func TestDeleteSync(t *testing.T) { novolumes, noclaims, noclaims, - noevents, + noevents, noerrors, wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Delete the volume before delete operation starts reactor.lock.Lock() @@ -106,7 +106,7 @@ func TestDeleteSync(t *testing.T) { newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), noclaims, newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound), - noevents, + noevents, noerrors, wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { reactor.lock.Lock() defer reactor.lock.Unlock() @@ -127,7 +127,7 @@ func TestDeleteSync(t *testing.T) { novolumes, newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), - noevents, + noevents, noerrors, // Inject deleter into the controller and call syncVolume. The // deleter simulates one delete() call that succeeds. wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), @@ -160,7 +160,7 @@ func TestDeleteMultiSync(t *testing.T) { novolumes, noclaims, noclaims, - []string{"Warning VolumeFailedDelete"}, + []string{"Warning VolumeFailedDelete"}, noerrors, wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), }, } diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index 8558d89fcc0..b1f798d0f09 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -83,6 +83,8 @@ type controllerTest struct { // Expected events - any event with prefix will pass, we don't check full // event message. expectedEvents []string + // Errors to produce on matching action + errors []reactorError // Function to call as the test. test testCall } @@ -96,6 +98,7 @@ var versionConflictError = errors.New("VersionError") var novolumes []*api.PersistentVolume var noclaims []*api.PersistentVolumeClaim var noevents = []string{} +var noerrors = []reactorError{} // volumeReactor is a core.Reactor that simulates etcd and API server. It // stores: @@ -108,6 +111,11 @@ var noevents = []string{} // - Optionally, volume and claim event sources. When set, all changed // volumes/claims are sent as Modify event to these sources. These sources can // be linked back to the controller watcher as "volume/claim updated" events. +// - Optionally, list of error that should be returned by reactor, simulating +// etcd / API server failures. These errors are evaluated in order and every +// error is returned only once. I.e. when the reactor finds matching +// reactorError, it return appropriate error and removes the reactorError from +// the list. type volumeReactor struct { volumes map[string]*api.PersistentVolume claims map[string]*api.PersistentVolumeClaim @@ -117,6 +125,17 @@ type volumeReactor struct { volumeSource *framework.FakeControllerSource claimSource *framework.FakeControllerSource lock sync.Mutex + errors []reactorError +} + +// reactorError is an error that is returned by test reactor (=simulated +// etcd+/API server) when an action performed by the reactor matches given verb +// ("get", "update", "create", "delete" or "*"") on given resource +// ("persistentvolumes", "persistentvolumeclaims" or "*"). +type reactorError struct { + verb string + resource string + error error } // React is a callback called by fake kubeClient from the controller. @@ -134,6 +153,13 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) + // Inject error when requested + err = r.injectReactError(action) + if err != nil { + return true, nil, err + } + + // Test did not requst to inject an error, continue simulating API server. switch { case action.Matches("create", "persistentvolumes"): obj := action.(core.UpdateAction).GetObject() @@ -246,6 +272,26 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return false, nil, nil } +// injectReactError returns an error when the test requested given action to +// fail. nil is returned otherwise. +func (r *volumeReactor) injectReactError(action core.Action) error { + if len(r.errors) == 0 { + // No more errors to inject, everything should succeed. + return nil + } + + for i, expected := range r.errors { + glog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource()) + if action.Matches(expected.verb, expected.resource) { + // That's the action we're waiting for, remove it from injectedErrors + r.errors = append(r.errors[:i], r.errors[i+1:]...) + glog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error) + return expected.error + } + } + return nil +} + // checkVolumes compares all expectedVolumes with set of volumes at the end of // the test and reports differences. func (r *volumeReactor) checkVolumes(t *testing.T, expectedVolumes []*api.PersistentVolume) error { @@ -425,13 +471,14 @@ func (r *volumeReactor) waitTest() { } } -func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource, claimSource *framework.FakeControllerSource) *volumeReactor { +func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource, claimSource *framework.FakeControllerSource, errors []reactorError) *volumeReactor { reactor := &volumeReactor{ volumes: make(map[string]*api.PersistentVolume), claims: make(map[string]*api.PersistentVolumeClaim), ctrl: ctrl, volumeSource: volumeSource, claimSource: claimSource, + errors: errors, } client.AddReactor("*", "*", reactor.React) return reactor @@ -677,7 +724,7 @@ func runSyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} ctrl := newPersistentVolumeController(client) - reactor := newVolumeReactor(client, ctrl, nil, nil) + reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) reactor.claims[claim.Name] = claim @@ -721,7 +768,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} ctrl := newPersistentVolumeController(client) - reactor := newVolumeReactor(client, ctrl, nil, nil) + reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) reactor.claims[claim.Name] = claim diff --git a/pkg/controller/persistentvolume/persistentvolume_provision_test.go b/pkg/controller/persistentvolume/persistentvolume_provision_test.go index 5655c4e6294..ecfa165d08b 100644 --- a/pkg/controller/persistentvolume/persistentvolume_provision_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_provision_test.go @@ -37,7 +37,7 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), - noevents, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), }, { // Provision failure - plugin not found @@ -46,7 +46,7 @@ func TestProvisionSync(t *testing.T) { novolumes, newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass), - []string{"Warning ProvisioningFailed"}, + []string{"Warning ProvisioningFailed"}, noerrors, testSyncClaim, }, { @@ -56,7 +56,7 @@ func TestProvisionSync(t *testing.T) { novolumes, newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), - []string{"Warning ProvisioningFailed"}, + []string{"Warning ProvisioningFailed"}, noerrors, wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), }, { @@ -66,7 +66,7 @@ func TestProvisionSync(t *testing.T) { novolumes, newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), - []string{"Warning ProvisioningFailed"}, + []string{"Warning ProvisioningFailed"}, noerrors, wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim), }, { @@ -129,7 +129,7 @@ func TestProvisionMultiSync(t *testing.T) { newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim12-1", "uid12-1", "1Gi", "pv-provisioned-for-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), - noevents, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), }, } diff --git a/pkg/controller/persistentvolume/persistentvolume_recycle_test.go b/pkg/controller/persistentvolume/persistentvolume_recycle_test.go index acac9436a23..14203b04c41 100644 --- a/pkg/controller/persistentvolume/persistentvolume_recycle_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_recycle_test.go @@ -36,7 +36,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-1", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - noevents, + noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), @@ -48,7 +48,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-2", "1Gi", "", "claim6-2", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - noevents, + noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), @@ -60,7 +60,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - []string{"Warning VolumeFailedRecycle"}, testSyncVolume, + []string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume, }, { // recycle failure - newRecycler returns error @@ -69,7 +69,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - []string{"Warning VolumeFailedRecycle"}, + []string{"Warning VolumeFailedRecycle"}, noerrors, wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), }, { @@ -79,7 +79,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - []string{"Warning VolumeFailedRecycle"}, + []string{"Warning VolumeFailedRecycle"}, noerrors, wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), }, { @@ -89,7 +89,7 @@ func TestRecycleSync(t *testing.T) { novolumes, noclaims, noclaims, - noevents, + noevents, noerrors, wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Delete the volume before recycle operation starts reactor.lock.Lock() @@ -106,7 +106,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-7", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - noevents, + noevents, noerrors, wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Mark the volume as Available before the recycler starts reactor.lock.Lock() @@ -127,7 +127,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-8", "1Gi", "", "claim6-8", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - noevents, + noevents, noerrors, wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { // Mark the volume as Available before the recycler starts reactor.lock.Lock() @@ -145,7 +145,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-9", "1Gi", "", "claim6-9", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending), newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending), - noevents, + noevents, noerrors, // Inject recycler into the controller and call syncVolume. The // recycler simulates one recycle() call that succeeds. wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), @@ -157,7 +157,7 @@ func TestRecycleSync(t *testing.T) { newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeFailed, "Unknown"), noclaims, noclaims, - []string{"Warning VolumeUnknownReclaimPolicy"}, testSyncVolume, + []string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume, }, } runSyncTests(t, tests) @@ -187,7 +187,7 @@ func TestRecycleMultiSync(t *testing.T) { newVolumeArray("volume7-1", "1Gi", "", "claim7-1", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), noclaims, noclaims, - []string{"Warning VolumeFailedRecycle"}, + []string{"Warning VolumeFailedRecycle"}, noerrors, wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), }, } diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/persistentvolume_sync_test.go index 8cdb5442b8e..b981c17d919 100644 --- a/pkg/controller/persistentvolume/persistentvolume_sync_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_sync_test.go @@ -39,7 +39,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending), newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim does not do anything when there is no matching volume. @@ -48,7 +48,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim resets claim.Status to Pending when there is no @@ -58,7 +58,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound), newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim binds claims to the smallest matching volume @@ -73,7 +73,7 @@ func TestSync(t *testing.T) { }, newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending), newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim binds a claim only to volume that points to it (by @@ -89,7 +89,7 @@ func TestSync(t *testing.T) { }, newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending), newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim binds a claim only to volume that points to it (by @@ -105,7 +105,7 @@ func TestSync(t *testing.T) { }, newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending), newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim does not bind claim to a volume prebound to a claim with @@ -115,7 +115,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim completes binding - simulates controller crash after @@ -125,7 +125,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending), newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim completes binding - simulates controller crash after @@ -135,7 +135,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending), newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim completes binding - simulates controller crash after @@ -145,7 +145,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, // [Unit test set 2] User asked for a specific PV. // Test the binding when pv.ClaimRef is already set by controller or @@ -157,7 +157,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that does not exist. @@ -167,7 +167,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound), newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that exists and is @@ -177,7 +177,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending), newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // claim with claim pre-bound to a PV that is pre-bound to the claim @@ -187,7 +187,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending), newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that is pre-bound to the @@ -198,7 +198,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending), newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim pre-bound to a PV that is bound to different @@ -208,7 +208,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound), newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim bound by controller to a PV that is bound to @@ -218,7 +218,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), - noevents, testSyncClaimError, + noevents, noerrors, testSyncClaimError, }, // [Unit test set 3] Syncing bound claim { @@ -229,7 +229,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted), - []string{"Warning ClaimLost"}, testSyncClaim, + []string{"Warning ClaimLost"}, noerrors, testSyncClaim, }, { // syncClaim with claim bound to non-exising volume. Check it's @@ -239,7 +239,7 @@ func TestSync(t *testing.T) { novolumes, newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted), - []string{"Warning ClaimLost"}, testSyncClaim, + []string{"Warning ClaimLost"}, noerrors, testSyncClaim, }, { // syncClaim with claim bound to unbound volume. Check it's bound. @@ -249,7 +249,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim bound to volume with missing (or different) @@ -259,7 +259,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted), - []string{"Warning ClaimMisbound"}, testSyncClaim, + []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, }, { // syncClaim with claim bound to bound volume. Check that the @@ -270,7 +270,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // syncClaim with claim bound to a volume that is bound to different @@ -281,7 +281,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), - []string{"Warning ClaimMisbound"}, testSyncClaim, + []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, }, // [Unit test set 4] All syncVolume tests. { @@ -291,7 +291,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), noclaims, noclaims, - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with prebound pending volume. Check it's marked as @@ -301,7 +301,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), noclaims, noclaims, - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound to missing claim. @@ -311,7 +311,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain), noclaims, noclaims, - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound to claim with different UID. @@ -321,7 +321,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound by controller to unbound claim. @@ -331,7 +331,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound by user to unbound claim. @@ -341,7 +341,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound to bound claim. @@ -351,7 +351,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound by controller to claim bound to @@ -361,7 +361,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, { // syncVolume with volume bound by user to claim bound to @@ -372,7 +372,7 @@ func TestSync(t *testing.T) { newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), - noevents, testSyncVolume, + noevents, noerrors, testSyncVolume, }, } runSyncTests(t, tests) @@ -402,7 +402,7 @@ func TestMultiSync(t *testing.T) { newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending), newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, { // Two controllers bound two PVs to single claim. Test one of them @@ -418,7 +418,7 @@ func TestMultiSync(t *testing.T) { }, newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), - noevents, testSyncClaim, + noevents, noerrors, testSyncClaim, }, } From c6f05c8056ba9ab95ecbefb67941d1e4b1843bba Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:29 +0200 Subject: [PATCH 28/34] provisioning: Add unit testso for provisioning errors. --- .../persistentvolume_provision_test.go | 147 ++++++++++++++++-- 1 file changed, 133 insertions(+), 14 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_provision_test.go b/pkg/controller/persistentvolume/persistentvolume_provision_test.go index ecfa165d08b..92eb222d680 100644 --- a/pkg/controller/persistentvolume/persistentvolume_provision_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_provision_test.go @@ -81,26 +81,145 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), - noevents, + noevents, noerrors, // No provisioning plugin confingure - makes the test fail when // the controller errorneously tries to provision something wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), }, - /* { - // Provision success? - claim is bound before provisioner creates - // a volume. - "11-7 - claim is bound before provisioning", - novolumes, - novolumes, - []*api.PersistentVolumeClaim{ - newClaim("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), + { + // Provision success? - claim is bound before provisioner creates + // a volume. + "11-7 - claim is bound before provisioning", + novolumes, + newVolumeArray("pv-provisioned-for-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), + // The claim would be bound in next syncClaim + newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Create a volume before provisionClaimOperation starts. + // This similates a parallel controller provisioning the volume. + reactor.lock.Lock() + volume := newVolume("pv-provisioned-for-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned) + reactor.volumes[volume.Name] = volume + reactor.lock.Unlock() + }), + }, + { + // Provision success - cannot save provisioned PV once, + // second retry succeeds + "11-8 - cannot save provisioned volume", + novolumes, + newVolumeArray("pv-provisioned-for-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), + noevents, + []reactorError{ + // Inject error to the first + // kubeclient.PersistentVolumes.Create() call. All other calls + // will succeed. + {"create", "persistentvolumes", errors.New("Mock creation error")}, + }, + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision success? - cannot save provisioned PV five times, + // volume is deleted and delete succeeds + "11-9 - cannot save provisioned volume, delete succeeds", + novolumes, + novolumes, + newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + wrapTestWithControllerConfig(operationDelete, []error{nil}, + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)), + }, + { + // Provision failure - cannot save provisioned PV five times, + // volume delete failed - no plugin found + "11-10 - cannot save provisioned volume, no delete plugin found", + novolumes, + novolumes, + newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + // No deleteCalls are configured, which results into no deleter plugin available for the volume + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision failure - cannot save provisioned PV five times, + // volume delete failed - deleter returns error five times + "11-11 - cannot save provisioned volume, deleter fails", + novolumes, + novolumes, + newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + wrapTestWithControllerConfig( + operationDelete, []error{ + errors.New("Mock deletion error1"), + errors.New("Mock deletion error2"), + errors.New("Mock deletion error3"), + errors.New("Mock deletion error4"), + errors.New("Mock deletion error5"), }, - []*api.PersistentVolumeClaim{ - newClaim("claim11-7", "uid11-7", "1Gi", "volume11-7", api.ClaimBound, annClass, annBindCompleted), + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + ), + }, + { + // Provision failure - cannot save provisioned PV five times, + // volume delete succeeds 2nd time + "11-12 - cannot save provisioned volume, delete succeeds 2nd time", + novolumes, + novolumes, + newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + wrapTestWithControllerConfig( + operationDelete, []error{ + errors.New("Mock deletion error1"), + nil, }, - []string{"Warning ProvisioningFailed"}, - getSyncClaimWithOperation(operationProvision, []error{errors.New("Moc provisioner error")}), - }, */ + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + ), + }, } runSyncTests(t, tests) } From 41adcc5496f86b5ad30dd7aa628c1602fb0df728 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:30 +0200 Subject: [PATCH 29/34] Speed up binding of provisioned volumes This fixes e2e test for provisioning - it expects that provisioned volumes are bound quickly. Majority of this patch is update of test framework needs to initialize the controller appropriately. --- .../persistentvolume_controller.go | 17 +++++++++++++++-- .../persistentvolume_controller_test.go | 3 +-- .../persistentvolume_framework_test.go | 15 ++++++++++++--- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 956feced3c0..87c3018aa0f 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -651,8 +651,6 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) } return nil } else if claim.Spec.VolumeName == "" { - // This block collapses into a NOP; we're leaving this here for - // completeness. if hasAnnotation(volume.ObjectMeta, annBoundByController) { // The binding is not completed; let PVC sync handle it glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) @@ -660,6 +658,21 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) // Dangling PV; try to re-establish the link in the PVC sync glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) } + // In both cases, the volume is Bound and the claim is Pending. + // Next syncClaim will fix it. To speed it up, we enqueue the claim + // into the controller, which results in syncClaim to be called + // shortly (and in the right goroutine). + // This speeds up binding of provisioned volumes - provisioner saves + // only the new PV and it expects that next syncClaim will bind the + // claim to it. + clone, err := conversion.NewCloner().DeepCopy(claim) + if err != nil { + return fmt.Errorf("error cloning claim %q: %v", claimToClaimKey(claim), err) + } + err = ctrl.claimController.Requeue(clone) + if err != nil { + return fmt.Errorf("error enqueing claim %q for faster sync: %v", claimToClaimKey(claim), err) + } return nil } else if claim.Spec.VolumeName == volume.Name { // Volume is bound to a claim properly, update status if necessary diff --git a/pkg/controller/persistentvolume/persistentvolume_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_controller_test.go index 552874f331f..9ae9010340f 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller_test.go @@ -128,8 +128,7 @@ func TestControllerSync(t *testing.T) { client := &fake.Clientset{} volumeSource := framework.NewFakeControllerSource() claimSource := framework.NewFakeControllerSource() - ctrl := newPersistentVolumeController(client) - ctrl.initializeController(time.Minute, volumeSource, claimSource) + ctrl := newPersistentVolumeController(client, volumeSource, claimSource) reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) for _, claim := range test.initialClaims { claimSource.Add(claim) diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/persistentvolume_framework_test.go index b1f798d0f09..f18578c5f3e 100644 --- a/pkg/controller/persistentvolume/persistentvolume_framework_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_framework_test.go @@ -484,7 +484,7 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, return reactor } -func newPersistentVolumeController(kubeClient clientset.Interface) *PersistentVolumeController { +func newPersistentVolumeController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher) *PersistentVolumeController { ctrl := &PersistentVolumeController{ volumes: newPersistentVolumeOrderedIndex(), claims: cache.NewStore(cache.MetaNamespaceKeyFunc), @@ -496,6 +496,15 @@ func newPersistentVolumeController(kubeClient clientset.Interface) *PersistentVo createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: 5 * time.Millisecond, } + + // Create dummy volume/claim sources for controller watchers when needed + if volumeSource == nil { + volumeSource = framework.NewFakeControllerSource() + } + if claimSource == nil { + claimSource = framework.NewFakeControllerSource() + } + ctrl.initializeController(5*time.Second, volumeSource, claimSource) return ctrl } @@ -723,7 +732,7 @@ func runSyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl := newPersistentVolumeController(client) + ctrl := newPersistentVolumeController(client, nil, nil) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -767,7 +776,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl := newPersistentVolumeController(client) + ctrl := newPersistentVolumeController(client, nil, nil) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) From 440b4bc6ba01ebfa65be7865bb33e252182b451a Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:31 +0200 Subject: [PATCH 30/34] Fix integration tests. --- test/integration/persistent_volumes_test.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/test/integration/persistent_volumes_test.go b/test/integration/persistent_volumes_test.go index 82206a42598..b0dbc0c4bca 100644 --- a/test/integration/persistent_volumes_test.go +++ b/test/integration/persistent_volumes_test.go @@ -49,21 +49,15 @@ func TestPersistentVolumeRecycler(t *testing.T) { deleteAllEtcdKeys() // Use higher QPS and Burst, there is a test for race condition below, which // creates many claims and default values were too low. - binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000}) - recyclerClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000}) testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000}) host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil) plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}, 0, 0, nil, nil, nil, nil}} cloud := &fake_cloud.FakeCloud{} - binder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(binderClient, 10*time.Second) - binder.Run() - defer binder.Stop() - - recycler, _ := persistentvolumecontroller.NewPersistentVolumeRecycler(recyclerClient, 30*time.Second, 3, plugins, cloud) - recycler.Run() - defer recycler.Stop() + ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "") + ctrl.Run() + defer ctrl.Stop() // This PV will be claimed, released, and recycled. pv := &api.PersistentVolume{ From c5fe1f943c9865d95d942914c130e4181f14945d Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:32 +0200 Subject: [PATCH 31/34] Fixed binder logging - we need the original volume/claim in error paths - don't report version conflicts as errors (they happen pretty often and we recover from them) --- .../persistentvolume_controller.go | 66 ++++++++++++++++--- 1 file changed, 56 insertions(+), 10 deletions(-) diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/persistentvolume_controller.go index 87c3018aa0f..7c1e59d618f 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/persistentvolume_controller.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" @@ -238,7 +239,13 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { return } if err := ctrl.syncVolume(pv); err != nil { - glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } } } @@ -255,7 +262,13 @@ func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) return } if err := ctrl.syncVolume(newVolume); err != nil { - glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } } } @@ -293,7 +306,13 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { // waiting until the next sync period for its Lost status. err := ctrl.syncClaim(claim) if err != nil { - glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) + if errors.IsConflict(err) { + // Version conflict error happens quite often and the + // controller recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) + } } } else { glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) @@ -314,7 +333,13 @@ func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { return } if err := ctrl.syncClaim(claim); err != nil { - glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } else { + glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } } } @@ -331,7 +356,13 @@ func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) return } if err := ctrl.syncClaim(newClaim); err != nil { - glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } else { + glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } } } @@ -372,7 +403,13 @@ func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { if volume != nil { err := ctrl.syncVolume(volume) if err != nil { - glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + if errors.IsConflict(err) { + // Version conflict error happens quite often and the + // controller recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } } } } else { @@ -669,6 +706,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) if err != nil { return fmt.Errorf("error cloning claim %q: %v", claimToClaimKey(claim), err) } + glog.V(5).Infof("requeueing claim %q for faster syncClaim", claimToClaimKey(claim)) err = ctrl.claimController.Requeue(clone) if err != nil { return fmt.Errorf("error enqueing claim %q for faster sync: %v", claimToClaimKey(claim), err) @@ -980,28 +1018,36 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *api.PersistentV // mechanism. func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) error { var err error + // use updateClaim/updatedVolume to keep the original claim/volume for + // logging in error cases. + var updatedClaim *api.PersistentVolumeClaim + var updatedVolume *api.PersistentVolume glog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) - if volume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { + if updatedVolume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) return err } + volume = updatedVolume - if volume, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil { + if updatedVolume, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil { glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) return err } + volume = updatedVolume - if claim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { + if updatedClaim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) return err } + claim = updatedClaim - if _, err = ctrl.updateClaimPhase(claim, api.ClaimBound); err != nil { + if updatedClaim, err = ctrl.updateClaimPhase(claim, api.ClaimBound); err != nil { glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) return err } + claim = updatedClaim glog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) glog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) From 7f549511e21334a4a545fb20a03959a3f2ffde5b Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:33 +0200 Subject: [PATCH 32/34] Big move and rename - remove persistentvolume_ prefix from all files - split controller.go into controller.go and controller_base.go (to have them under 1500 lines for github) --- ...tentvolume_sync_test.go => binder_test.go} | 0 ...tentvolume_controller.go => controller.go} | 354 ---------------- .../persistentvolume/controller_base.go | 390 ++++++++++++++++++ ..._controller_test.go => controller_test.go} | 0 ...ntvolume_delete_test.go => delete_test.go} | 0 ...me_framework_test.go => framework_test.go} | 0 .../{persistentvolume_index.go => index.go} | 0 ...tentvolume_index_test.go => index_test.go} | 0 ...me_provision_test.go => provision_test.go} | 0 ...volume_recycle_test.go => recycle_test.go} | 0 ...ersistentvolume_host.go => volume_host.go} | 0 11 files changed, 390 insertions(+), 354 deletions(-) rename pkg/controller/persistentvolume/{persistentvolume_sync_test.go => binder_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_controller.go => controller.go} (80%) create mode 100644 pkg/controller/persistentvolume/controller_base.go rename pkg/controller/persistentvolume/{persistentvolume_controller_test.go => controller_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_delete_test.go => delete_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_framework_test.go => framework_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_index.go => index.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_index_test.go => index_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_provision_test.go => provision_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_recycle_test.go => recycle_test.go} (100%) rename pkg/controller/persistentvolume/{persistentvolume_host.go => volume_host.go} (100%) diff --git a/pkg/controller/persistentvolume/persistentvolume_sync_test.go b/pkg/controller/persistentvolume/binder_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_sync_test.go rename to pkg/controller/persistentvolume/binder_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_controller.go b/pkg/controller/persistentvolume/controller.go similarity index 80% rename from pkg/controller/persistentvolume/persistentvolume_controller.go rename to pkg/controller/persistentvolume/controller.go index 7c1e59d618f..21d643ccf6c 100644 --- a/pkg/controller/persistentvolume/persistentvolume_controller.go +++ b/pkg/controller/persistentvolume/controller.go @@ -22,17 +22,13 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" vol "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/watch" "github.com/golang/glog" ) @@ -144,280 +140,6 @@ type PersistentVolumeController struct { createProvisionedPVInterval time.Duration } -// NewPersistentVolumeController creates a new PersistentVolumeController -func NewPersistentVolumeController( - kubeClient clientset.Interface, - syncPeriod time.Duration, - provisioner vol.ProvisionableVolumePlugin, - recyclers []vol.VolumePlugin, - cloud cloudprovider.Interface, - clusterName string) *PersistentVolumeController { - - broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) - recorder := broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) - - controller := &PersistentVolumeController{ - kubeClient: kubeClient, - eventRecorder: recorder, - runningOperations: make(map[string]bool), - cloud: cloud, - provisioner: provisioner, - clusterName: clusterName, - createProvisionedPVRetryCount: createProvisionedPVRetryCount, - createProvisionedPVInterval: createProvisionedPVInterval, - } - controller.recyclePluginMgr.InitPlugins(recyclers, controller) - if controller.provisioner != nil { - if err := controller.provisioner.Init(controller); err != nil { - glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) - } - } - - volumeSource := &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumes().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumes().Watch(options) - }, - } - - claimSource := &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) - }, - } - - controller.initializeController(syncPeriod, volumeSource, claimSource) - - return controller -} - -// initializeController prepares watching for PersistentVolume and -// PersistentVolumeClaim events from given sources. This should be used to -// initialize the controller for real operation (with real event sources) and -// also during testing (with fake ones). -func (ctrl *PersistentVolumeController) initializeController(syncPeriod time.Duration, volumeSource, claimSource cache.ListerWatcher) { - glog.V(4).Infof("initializing PersistentVolumeController, sync every %s", syncPeriod.String()) - ctrl.volumes.store, ctrl.volumeController = framework.NewIndexerInformer( - volumeSource, - &api.PersistentVolume{}, - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addVolume, - UpdateFunc: ctrl.updateVolume, - DeleteFunc: ctrl.deleteVolume, - }, - cache.Indexers{"accessmodes": accessModesIndexFunc}, - ) - ctrl.claims, ctrl.claimController = framework.NewInformer( - claimSource, - &api.PersistentVolumeClaim{}, - syncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addClaim, - UpdateFunc: ctrl.updateClaim, - DeleteFunc: ctrl.deleteClaim, - }, - ) -} - -// addVolume is callback from framework.Controller watching PersistentVolume -// events. -func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { - if !ctrl.isFullySynced() { - return - } - - pv, ok := obj.(*api.PersistentVolume) - if !ok { - glog.Errorf("expected PersistentVolume but handler received %+v", obj) - return - } - if err := ctrl.syncVolume(pv); err != nil { - if errors.IsConflict(err) { - // Version conflict error happens quite often and the controller - // recovers from it easily. - glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) - } else { - glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) - } - } -} - -// updateVolume is callback from framework.Controller watching PersistentVolume -// events. -func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { - if !ctrl.isFullySynced() { - return - } - - newVolume, ok := newObj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) - return - } - if err := ctrl.syncVolume(newVolume); err != nil { - if errors.IsConflict(err) { - // Version conflict error happens quite often and the controller - // recovers from it easily. - glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) - } else { - glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) - } - } -} - -// deleteVolume is callback from framework.Controller watching PersistentVolume -// events. -func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { - if !ctrl.isFullySynced() { - return - } - - var volume *api.PersistentVolume - var ok bool - volume, ok = obj.(*api.PersistentVolume) - if !ok { - if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { - volume, ok = unknown.Obj.(*api.PersistentVolume) - if !ok { - glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) - return - } - } else { - glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj) - return - } - } - - if !ok || volume == nil || volume.Spec.ClaimRef == nil { - return - } - - if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists { - if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil { - // sync the claim when its volume is deleted. Explicitly syncing the - // claim here in response to volume deletion prevents the claim from - // waiting until the next sync period for its Lost status. - err := ctrl.syncClaim(claim) - if err != nil { - if errors.IsConflict(err) { - // Version conflict error happens quite often and the - // controller recovers from it easily. - glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) - } else { - glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) - } - } - } else { - glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) - } - } -} - -// addClaim is callback from framework.Controller watching PersistentVolumeClaim -// events. -func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { - if !ctrl.isFullySynced() { - return - } - - claim, ok := obj.(*api.PersistentVolumeClaim) - if !ok { - glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj) - return - } - if err := ctrl.syncClaim(claim); err != nil { - if errors.IsConflict(err) { - // Version conflict error happens quite often and the controller - // recovers from it easily. - glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) - } else { - glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) - } - } -} - -// updateClaim is callback from framework.Controller watching PersistentVolumeClaim -// events. -func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { - if !ctrl.isFullySynced() { - return - } - - newClaim, ok := newObj.(*api.PersistentVolumeClaim) - if !ok { - glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) - return - } - if err := ctrl.syncClaim(newClaim); err != nil { - if errors.IsConflict(err) { - // Version conflict error happens quite often and the controller - // recovers from it easily. - glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) - } else { - glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) - } - } -} - -// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim -// events. -func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { - if !ctrl.isFullySynced() { - return - } - - var volume *api.PersistentVolume - var claim *api.PersistentVolumeClaim - var ok bool - - claim, ok = obj.(*api.PersistentVolumeClaim) - if !ok { - if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { - claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) - if !ok { - glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) - return - } - } else { - glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) - return - } - } - - if !ok || claim == nil { - return - } - - if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists { - if volume, ok = pvObj.(*api.PersistentVolume); ok { - // sync the volume when its claim is deleted. Explicitly sync'ing the - // volume here in response to claim deletion prevents the volume from - // waiting until the next sync period for its Release. - if volume != nil { - err := ctrl.syncVolume(volume) - if err != nil { - if errors.IsConflict(err) { - // Version conflict error happens quite often and the - // controller recovers from it easily. - glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) - } else { - glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) - } - } - } - } else { - glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) - } - } -} - // syncClaim is the main controller method to decide what to do with a claim. // It's invoked by appropriate framework.Controller callbacks when a claim is // created, updated or periodically synced. We do not differentiate between @@ -761,36 +483,6 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) } } -// Run starts all of this controller's control loops -func (ctrl *PersistentVolumeController) Run() { - glog.V(4).Infof("starting PersistentVolumeController") - - if ctrl.volumeControllerStopCh == nil { - ctrl.volumeControllerStopCh = make(chan struct{}) - go ctrl.volumeController.Run(ctrl.volumeControllerStopCh) - } - - if ctrl.claimControllerStopCh == nil { - ctrl.claimControllerStopCh = make(chan struct{}) - go ctrl.claimController.Run(ctrl.claimControllerStopCh) - } -} - -// Stop gracefully shuts down this controller -func (ctrl *PersistentVolumeController) Stop() { - glog.V(4).Infof("stopping PersistentVolumeController") - close(ctrl.volumeControllerStopCh) - close(ctrl.claimControllerStopCh) -} - -// isFullySynced returns true, if both volume and claim caches are fully loaded -// after startup. -// We do not want to process events with not fully loaded caches - e.g. we might -// recycle/delete PVs that don't have corresponding claim in the cache yet. -func (ctrl *PersistentVolumeController) isFullySynced() bool { - return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() -} - // updateClaimPhase saves new claim phase to API server. func (ctrl *PersistentVolumeController) updateClaimPhase(claim *api.PersistentVolumeClaim, phase api.PersistentVolumeClaimPhase) (*api.PersistentVolumeClaim, error) { glog.V(4).Infof("updating PersistentVolumeClaim[%s]: set phase %s", claimToClaimKey(claim), phase) @@ -1530,49 +1222,3 @@ func (ctrl *PersistentVolumeController) finishRunningOperation(operationName str func (ctrl *PersistentVolumeController) startRunningOperation(operationName string) { ctrl.runningOperations[operationName] = true } - -// Stateless functions - -func hasAnnotation(obj api.ObjectMeta, ann string) bool { - _, found := obj.Annotations[ann] - return found -} - -func setAnnotation(obj *api.ObjectMeta, ann string, value string) { - if obj.Annotations == nil { - obj.Annotations = make(map[string]string) - } - obj.Annotations[ann] = value -} - -func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string { - everBound := hasAnnotation(claim.ObjectMeta, annBindCompleted) - boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController) - - return fmt.Sprintf("phase: %s, bound to: %q, wasEverBound: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, everBound, boundByController) -} - -func getVolumeStatusForLogging(volume *api.PersistentVolume) string { - boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController) - claimName := "" - if volume.Spec.ClaimRef != nil { - claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID) - } - return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController) -} - -// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound -// to specific claim. Both claim.Name and claim.Namespace must be equal. -// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. -func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool { - if volume.Spec.ClaimRef == nil { - return false - } - if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { - return false - } - if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { - return false - } - return true -} diff --git a/pkg/controller/persistentvolume/controller_base.go b/pkg/controller/persistentvolume/controller_base.go new file mode 100644 index 00000000000..de5ff8a5f0b --- /dev/null +++ b/pkg/controller/persistentvolume/controller_base.go @@ -0,0 +1,390 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + vol "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" +) + +// This file contains the controller base functionality, i.e. framework to +// process PV/PVC added/updated/deleted events. The real binding, provisioning, +// recycling and deleting is done in controller.go + +// NewPersistentVolumeController creates a new PersistentVolumeController +func NewPersistentVolumeController( + kubeClient clientset.Interface, + syncPeriod time.Duration, + provisioner vol.ProvisionableVolumePlugin, + recyclers []vol.VolumePlugin, + cloud cloudprovider.Interface, + clusterName string) *PersistentVolumeController { + + broadcaster := record.NewBroadcaster() + broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + recorder := broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) + + controller := &PersistentVolumeController{ + kubeClient: kubeClient, + eventRecorder: recorder, + runningOperations: make(map[string]bool), + cloud: cloud, + provisioner: provisioner, + clusterName: clusterName, + createProvisionedPVRetryCount: createProvisionedPVRetryCount, + createProvisionedPVInterval: createProvisionedPVInterval, + } + controller.recyclePluginMgr.InitPlugins(recyclers, controller) + if controller.provisioner != nil { + if err := controller.provisioner.Init(controller); err != nil { + glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) + } + } + + volumeSource := &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumes().Watch(options) + }, + } + + claimSource := &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) + }, + } + + controller.initializeController(syncPeriod, volumeSource, claimSource) + + return controller +} + +// initializeController prepares watching for PersistentVolume and +// PersistentVolumeClaim events from given sources. This should be used to +// initialize the controller for real operation (with real event sources) and +// also during testing (with fake ones). +func (ctrl *PersistentVolumeController) initializeController(syncPeriod time.Duration, volumeSource, claimSource cache.ListerWatcher) { + glog.V(4).Infof("initializing PersistentVolumeController, sync every %s", syncPeriod.String()) + ctrl.volumes.store, ctrl.volumeController = framework.NewIndexerInformer( + volumeSource, + &api.PersistentVolume{}, + syncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addVolume, + UpdateFunc: ctrl.updateVolume, + DeleteFunc: ctrl.deleteVolume, + }, + cache.Indexers{"accessmodes": accessModesIndexFunc}, + ) + ctrl.claims, ctrl.claimController = framework.NewInformer( + claimSource, + &api.PersistentVolumeClaim{}, + syncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addClaim, + UpdateFunc: ctrl.updateClaim, + DeleteFunc: ctrl.deleteClaim, + }, + ) +} + +// addVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + pv, ok := obj.(*api.PersistentVolume) + if !ok { + glog.Errorf("expected PersistentVolume but handler received %+v", obj) + return + } + if err := ctrl.syncVolume(pv); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } + } +} + +// updateVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { + if !ctrl.isFullySynced() { + return + } + + newVolume, ok := newObj.(*api.PersistentVolume) + if !ok { + glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) + return + } + if err := ctrl.syncVolume(newVolume); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } + } +} + +// deleteVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + var volume *api.PersistentVolume + var ok bool + volume, ok = obj.(*api.PersistentVolume) + if !ok { + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + volume, ok = unknown.Obj.(*api.PersistentVolume) + if !ok { + glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) + return + } + } else { + glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj) + return + } + } + + if !ok || volume == nil || volume.Spec.ClaimRef == nil { + return + } + + if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists { + if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil { + // sync the claim when its volume is deleted. Explicitly syncing the + // claim here in response to volume deletion prevents the claim from + // waiting until the next sync period for its Lost status. + err := ctrl.syncClaim(claim) + if err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the + // controller recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err) + } + } + } else { + glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) + } + } +} + +// addClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + claim, ok := obj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj) + return + } + if err := ctrl.syncClaim(claim); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } else { + glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } + } +} + +// updateClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { + if !ctrl.isFullySynced() { + return + } + + newClaim, ok := newObj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) + return + } + if err := ctrl.syncClaim(newClaim); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } else { + glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } + } +} + +// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + var volume *api.PersistentVolume + var claim *api.PersistentVolumeClaim + var ok bool + + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) + return + } + } else { + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) + return + } + } + + if !ok || claim == nil { + return + } + + if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists { + if volume, ok = pvObj.(*api.PersistentVolume); ok { + // sync the volume when its claim is deleted. Explicitly sync'ing the + // volume here in response to claim deletion prevents the volume from + // waiting until the next sync period for its Release. + if volume != nil { + err := ctrl.syncVolume(volume) + if err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the + // controller recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } + } + } + } else { + glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) + } + } +} + +// Run starts all of this controller's control loops +func (ctrl *PersistentVolumeController) Run() { + glog.V(4).Infof("starting PersistentVolumeController") + + if ctrl.volumeControllerStopCh == nil { + ctrl.volumeControllerStopCh = make(chan struct{}) + go ctrl.volumeController.Run(ctrl.volumeControllerStopCh) + } + + if ctrl.claimControllerStopCh == nil { + ctrl.claimControllerStopCh = make(chan struct{}) + go ctrl.claimController.Run(ctrl.claimControllerStopCh) + } +} + +// Stop gracefully shuts down this controller +func (ctrl *PersistentVolumeController) Stop() { + glog.V(4).Infof("stopping PersistentVolumeController") + close(ctrl.volumeControllerStopCh) + close(ctrl.claimControllerStopCh) +} + +// isFullySynced returns true, if both volume and claim caches are fully loaded +// after startup. +// We do not want to process events with not fully loaded caches - e.g. we might +// recycle/delete PVs that don't have corresponding claim in the cache yet. +func (ctrl *PersistentVolumeController) isFullySynced() bool { + return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() +} + +// Stateless functions + +func hasAnnotation(obj api.ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +func setAnnotation(obj *api.ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string { + bound := hasAnnotation(claim.ObjectMeta, annBindCompleted) + boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController) + + return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController) +} + +func getVolumeStatusForLogging(volume *api.PersistentVolume) string { + boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController) + claimName := "" + if volume.Spec.ClaimRef != nil { + claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID) + } + return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController) +} + +// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound +// to specific claim. Both claim.Name and claim.Namespace must be equal. +// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. +func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool { + if volume.Spec.ClaimRef == nil { + return false + } + if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { + return false + } + if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { + return false + } + return true +} diff --git a/pkg/controller/persistentvolume/persistentvolume_controller_test.go b/pkg/controller/persistentvolume/controller_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_controller_test.go rename to pkg/controller/persistentvolume/controller_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_delete_test.go b/pkg/controller/persistentvolume/delete_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_delete_test.go rename to pkg/controller/persistentvolume/delete_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_framework_test.go b/pkg/controller/persistentvolume/framework_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_framework_test.go rename to pkg/controller/persistentvolume/framework_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_index.go b/pkg/controller/persistentvolume/index.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_index.go rename to pkg/controller/persistentvolume/index.go diff --git a/pkg/controller/persistentvolume/persistentvolume_index_test.go b/pkg/controller/persistentvolume/index_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_index_test.go rename to pkg/controller/persistentvolume/index_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_provision_test.go b/pkg/controller/persistentvolume/provision_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_provision_test.go rename to pkg/controller/persistentvolume/provision_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_recycle_test.go b/pkg/controller/persistentvolume/recycle_test.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_recycle_test.go rename to pkg/controller/persistentvolume/recycle_test.go diff --git a/pkg/controller/persistentvolume/persistentvolume_host.go b/pkg/controller/persistentvolume/volume_host.go similarity index 100% rename from pkg/controller/persistentvolume/persistentvolume_host.go rename to pkg/controller/persistentvolume/volume_host.go From 79b91b9ee04e0e4e558ffb860602e86eb4d6053f Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:34 +0200 Subject: [PATCH 33/34] Refactor persistent volume initialization There should be only one initialization function, shared by the real controller and unit tests. --- .../app/controllermanager.go | 1 + .../controllermanager/controllermanager.go | 3 + .../persistentvolume/controller_base.go | 76 +++++++++---------- .../persistentvolume/controller_test.go | 2 +- .../persistentvolume/framework_test.go | 34 ++++----- test/integration/persistent_volumes_test.go | 2 +- 6 files changed, 61 insertions(+), 57 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 1a2bf5ca1c7..f55e4fc0ae9 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -385,6 +385,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, s.ClusterName, + nil, nil, nil, ) volumeController.Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index 36ee2e6546d..3374dc4ab5f 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -283,6 +283,9 @@ func (s *CMServer) Run(_ []string) error { kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, s.ClusterName, + nil, + nil, + nil, ) volumeController.Run() diff --git a/pkg/controller/persistentvolume/controller_base.go b/pkg/controller/persistentvolume/controller_base.go index de5ff8a5f0b..6a217a590c5 100644 --- a/pkg/controller/persistentvolume/controller_base.go +++ b/pkg/controller/persistentvolume/controller_base.go @@ -46,15 +46,20 @@ func NewPersistentVolumeController( provisioner vol.ProvisionableVolumePlugin, recyclers []vol.VolumePlugin, cloud cloudprovider.Interface, - clusterName string) *PersistentVolumeController { + clusterName string, + volumeSource, claimSource cache.ListerWatcher, + eventRecorder record.EventRecorder, +) *PersistentVolumeController { - broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) - recorder := broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) + if eventRecorder == nil { + broadcaster := record.NewBroadcaster() + broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) + } controller := &PersistentVolumeController{ kubeClient: kubeClient, - eventRecorder: recorder, + eventRecorder: eventRecorder, runningOperations: make(map[string]bool), cloud: cloud, provisioner: provisioner, @@ -62,6 +67,7 @@ func NewPersistentVolumeController( createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: createProvisionedPVInterval, } + controller.recyclePluginMgr.InitPlugins(recyclers, controller) if controller.provisioner != nil { if err := controller.provisioner.Init(controller); err != nil { @@ -69,56 +75,50 @@ func NewPersistentVolumeController( } } - volumeSource := &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumes().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumes().Watch(options) - }, + if volumeSource == nil { + volumeSource = &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumes().Watch(options) + }, + } } - claimSource := &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) - }, + if claimSource == nil { + claimSource = &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) + }, + } } - controller.initializeController(syncPeriod, volumeSource, claimSource) - - return controller -} - -// initializeController prepares watching for PersistentVolume and -// PersistentVolumeClaim events from given sources. This should be used to -// initialize the controller for real operation (with real event sources) and -// also during testing (with fake ones). -func (ctrl *PersistentVolumeController) initializeController(syncPeriod time.Duration, volumeSource, claimSource cache.ListerWatcher) { - glog.V(4).Infof("initializing PersistentVolumeController, sync every %s", syncPeriod.String()) - ctrl.volumes.store, ctrl.volumeController = framework.NewIndexerInformer( + controller.volumes.store, controller.volumeController = framework.NewIndexerInformer( volumeSource, &api.PersistentVolume{}, syncPeriod, framework.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addVolume, - UpdateFunc: ctrl.updateVolume, - DeleteFunc: ctrl.deleteVolume, + AddFunc: controller.addVolume, + UpdateFunc: controller.updateVolume, + DeleteFunc: controller.deleteVolume, }, cache.Indexers{"accessmodes": accessModesIndexFunc}, ) - ctrl.claims, ctrl.claimController = framework.NewInformer( + controller.claims, controller.claimController = framework.NewInformer( claimSource, &api.PersistentVolumeClaim{}, syncPeriod, framework.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addClaim, - UpdateFunc: ctrl.updateClaim, - DeleteFunc: ctrl.deleteClaim, + AddFunc: controller.addClaim, + UpdateFunc: controller.updateClaim, + DeleteFunc: controller.deleteClaim, }, ) + return controller } // addVolume is callback from framework.Controller watching PersistentVolume diff --git a/pkg/controller/persistentvolume/controller_test.go b/pkg/controller/persistentvolume/controller_test.go index 9ae9010340f..d3f92e6b6f7 100644 --- a/pkg/controller/persistentvolume/controller_test.go +++ b/pkg/controller/persistentvolume/controller_test.go @@ -128,7 +128,7 @@ func TestControllerSync(t *testing.T) { client := &fake.Clientset{} volumeSource := framework.NewFakeControllerSource() claimSource := framework.NewFakeControllerSource() - ctrl := newPersistentVolumeController(client, volumeSource, claimSource) + ctrl := newTestController(client, volumeSource, claimSource) reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) for _, claim := range test.initialClaims { claimSource.Add(claim) diff --git a/pkg/controller/persistentvolume/framework_test.go b/pkg/controller/persistentvolume/framework_test.go index f18578c5f3e..aea5dc4e8bd 100644 --- a/pkg/controller/persistentvolume/framework_test.go +++ b/pkg/controller/persistentvolume/framework_test.go @@ -484,27 +484,27 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, return reactor } -func newPersistentVolumeController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher) *PersistentVolumeController { - ctrl := &PersistentVolumeController{ - volumes: newPersistentVolumeOrderedIndex(), - claims: cache.NewStore(cache.MetaNamespaceKeyFunc), - kubeClient: kubeClient, - eventRecorder: record.NewFakeRecorder(1000), - runningOperations: make(map[string]bool), - - // Speed up the testing - createProvisionedPVRetryCount: createProvisionedPVRetryCount, - createProvisionedPVInterval: 5 * time.Millisecond, - } - - // Create dummy volume/claim sources for controller watchers when needed +func newTestController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher) *PersistentVolumeController { if volumeSource == nil { volumeSource = framework.NewFakeControllerSource() } if claimSource == nil { claimSource = framework.NewFakeControllerSource() } - ctrl.initializeController(5*time.Second, volumeSource, claimSource) + ctrl := NewPersistentVolumeController( + kubeClient, + 5*time.Second, // sync period + nil, // provisioner + []vol.VolumePlugin{}, // recyclers + nil, // cloud + "", + volumeSource, + claimSource, + record.NewFakeRecorder(1000), // event recorder + ) + + // Speed up the test + ctrl.createProvisionedPVInterval = 5 * time.Millisecond return ctrl } @@ -732,7 +732,7 @@ func runSyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl := newPersistentVolumeController(client, nil, nil) + ctrl := newTestController(client, nil, nil) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) @@ -776,7 +776,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl := newPersistentVolumeController(client, nil, nil) + ctrl := newTestController(client, nil, nil) reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) diff --git a/test/integration/persistent_volumes_test.go b/test/integration/persistent_volumes_test.go index b0dbc0c4bca..a5fa8a9f946 100644 --- a/test/integration/persistent_volumes_test.go +++ b/test/integration/persistent_volumes_test.go @@ -55,7 +55,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}, 0, 0, nil, nil, nil, nil}} cloud := &fake_cloud.FakeCloud{} - ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "") + ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "", nil, nil, nil) ctrl.Run() defer ctrl.Stop() From 01b20d8e77776dda3b7ec3389bd200ed80716d46 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 May 2016 14:55:50 +0200 Subject: [PATCH 34/34] Generate shorter provisioned PV names. GCE PD names are generated out of provisioned PV.Name, therefore it should be as short as possible and still unique. --- pkg/controller/persistentvolume/controller.go | 2 +- pkg/controller/persistentvolume/provision_test.go | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/controller/persistentvolume/controller.go b/pkg/controller/persistentvolume/controller.go index 21d643ccf6c..e61a30baa65 100644 --- a/pkg/controller/persistentvolume/controller.go +++ b/pkg/controller/persistentvolume/controller.go @@ -1169,7 +1169,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume. // The name must be unique func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim *api.PersistentVolumeClaim) string { - return "pv-provisioned-for-" + string(claim.UID) + return "pvc-" + string(claim.UID) } // scheduleOperation starts given asynchronous operation on given volume. It diff --git a/pkg/controller/persistentvolume/provision_test.go b/pkg/controller/persistentvolume/provision_test.go index 92eb222d680..44dad138831 100644 --- a/pkg/controller/persistentvolume/provision_test.go +++ b/pkg/controller/persistentvolume/provision_test.go @@ -33,7 +33,7 @@ func TestProvisionSync(t *testing.T) { // Provision a volume "11-1 - successful provision", novolumes, - newVolumeArray("pv-provisioned-for-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), @@ -76,7 +76,7 @@ func TestProvisionSync(t *testing.T) { newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), []*api.PersistentVolume{ newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), - newVolume("pv-provisioned-for-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), }, newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim @@ -91,7 +91,7 @@ func TestProvisionSync(t *testing.T) { // a volume. "11-7 - claim is bound before provisioning", novolumes, - newVolumeArray("pv-provisioned-for-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), // The claim would be bound in next syncClaim newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), @@ -100,7 +100,7 @@ func TestProvisionSync(t *testing.T) { // Create a volume before provisionClaimOperation starts. // This similates a parallel controller provisioning the volume. reactor.lock.Lock() - volume := newVolume("pv-provisioned-for-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned) + volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned) reactor.volumes[volume.Name] = volume reactor.lock.Unlock() }), @@ -110,7 +110,7 @@ func TestProvisionSync(t *testing.T) { // second retry succeeds "11-8 - cannot save provisioned volume", novolumes, - newVolumeArray("pv-provisioned-for-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), @@ -244,10 +244,10 @@ func TestProvisionMultiSync(t *testing.T) { // Provision a volume with binding "12-1 - successful provision", novolumes, - newVolumeArray("pv-provisioned-for-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), // Binding will be completed in the next syncClaim - newClaimArray("claim12-1", "uid12-1", "1Gi", "pv-provisioned-for-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), + newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), }, }