From beacd8722af7d1c7e6a72360fdbee00b75e03d4b Mon Sep 17 00:00:00 2001 From: markturansky Date: Mon, 27 Apr 2015 14:57:07 -0400 Subject: [PATCH] addressed feedback. added opt-in cmd line flag --- cmd/kube-controller-manager/app/controllermanager.go | 8 ++++++-- pkg/api/testing/fuzzer.go | 6 ++++-- pkg/volumeclaimbinder/persistent_volume_claim_binder.go | 8 +++++++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 56f11a47b97..bb985ea14b2 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -60,6 +60,7 @@ type CMServer struct { ResourceQuotaSyncPeriod time.Duration NamespaceSyncPeriod time.Duration PVClaimBinderSyncPeriod time.Duration + EnablePVCClaimBinder bool RegisterRetryCount int MachineList util.StringList SyncNodeList bool @@ -117,6 +118,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource_quota_sync_period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system") fs.DurationVar(&s.NamespaceSyncPeriod, "namespace_sync_period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates") fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder_sync_period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims") + fs.BoolVar(&s.EnablePVCClaimBinder, "enable_alpha_pvclaimbinder", s.EnablePVCClaimBinder, "Optionally enable persistent volume claim binding. This feature is experimental and expected to change.") fs.DurationVar(&s.PodEvictionTimeout, "pod_eviction_timeout", s.PodEvictionTimeout, "The grace peroid for deleting pods on failed nodes.") fs.Float32Var(&s.DeletingPodsQps, "deleting_pods_qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.") fs.IntVar(&s.DeletingPodsBurst, "deleting_pods_burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.") @@ -235,8 +237,10 @@ func (s *CMServer) Run(_ []string) error { namespaceManager := namespace.NewNamespaceManager(kubeClient, s.NamespaceSyncPeriod) namespaceManager.Run() - pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) - pvclaimBinder.Run() + if s.EnablePVCClaimBinder { + pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) + pvclaimBinder.Run() + } select {} return nil diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index e62d790a2a6..340656b5348 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -203,11 +203,13 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { }, func(pv *api.PersistentVolume, c fuzz.Continue) { c.FuzzNoCustom(pv) // fuzz self without calling this function again - pv.Status.Phase = api.VolumePending + types := []api.PersistentVolumePhase{api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeAvailable} + pv.Status.Phase = types[c.Rand.Intn(len(types))] }, func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) { c.FuzzNoCustom(pvc) // fuzz self without calling this function again - pvc.Status.Phase = api.ClaimPending + types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending} + pvc.Status.Phase = types[c.Rand.Intn(len(types))] }, func(s *api.NamespaceSpec, c fuzz.Continue) { s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes} diff --git a/pkg/volumeclaimbinder/persistent_volume_claim_binder.go b/pkg/volumeclaimbinder/persistent_volume_claim_binder.go index bc097f260f5..8cef18fc45c 100644 --- a/pkg/volumeclaimbinder/persistent_volume_claim_binder.go +++ b/pkg/volumeclaimbinder/persistent_volume_claim_binder.go @@ -84,6 +84,8 @@ func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time framework.ResourceEventHandlerFuncs{ AddFunc: binder.addClaim, UpdateFunc: binder.updateClaim, + // no DeleteFunc needed. a claim requires no clean-up. + // the missing claim itself is the release of the resource. }, ) @@ -177,6 +179,8 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl if err == nil { // bound and active. Build claim status as needed. if claim.Status.VolumeRef == nil { + // syncClaimStatus sets VolumeRef, attempts to persist claim status, + // and does a rollback as needed on claim.Status syncClaimStatus(binderClient, volume, claim) } } else { @@ -198,6 +202,9 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl if currentPhase != nextPhase { volume.Status.Phase = nextPhase + + // a change in state will trigger another update through this controller. + // each pass through this controller evaluates current phase and decides whether or not to change to the next phase volume, err := binderClient.UpdatePersistentVolumeStatus(volume) if err != nil { // Rollback to previous phase @@ -220,7 +227,6 @@ func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCli nextPhase := currentPhase switch currentPhase { - // pending claims await a matching volume case api.ClaimPending: volume, err := volumeIndex.FindBestMatchForClaim(claim)