Merge pull request #24331 from jsafrane/devel/refactor-binder

Automatic merge from submit-queue

Refactor persistent volume controller

Here is complete persistent controller as designed in https://github.com/pmorie/pv-haxxz/blob/master/controller.go

It's feature complete and compatible with current binder/recycler/provisioner. No new features, it *should* be much more stable and predictable.

Testing
--
The unit test framework is quite complicated, still it was necessary to reach reasonable coverage (78% in `persistentvolume_controller.go`). The untested part are error cases, which are quite hard to test in reasonable way - sure, I can inject a VersionConflictError on any object update and check the error bubbles up to appropriate places, but the real test would be to run `syncClaim`/`syncVolume` again and check it recovers appropriately from the error in the next periodic sync. That's the hard part.

Organization
---
The PR starts with `rm -rf kubernetes/pkg/controller/persistentvolume`. I find it easier to read when I see only the new controller without old pieces scattered around.
[`types.go` from the old controller is reused to speed up matching a bit, the code looks solid and has 95% unit test coverage].

I tried to split the PR into smaller patches, let me know what you think.

~~TODO~~
--

* ~~Missing: provisioning, recycling~~.
* ~~Fix integration tests~~
* ~~Fix e2e tests~~

@kubernetes/sig-storage

<!-- Reviewable:start -->
---
This change is [<img src="http://reviewable.k8s.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](http://reviewable.k8s.io/reviews/kubernetes/kubernetes/24331)
<!-- Reviewable:end -->

Fixes #15632
This commit is contained in:
k8s-merge-robot 2016-05-19 03:06:46 -07:00
commit c63ac4e664
34 changed files with 4056 additions and 3019 deletions

View File

@ -373,38 +373,23 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
pvclaimBinder.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
provisioner,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
s.ClusterName,
nil, nil, nil,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
volumeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
go volume.NewAttachDetachController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, ResyncPeriod(s)()).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

View File

@ -271,37 +271,23 @@ func (s *CMServer) Run(_ []string) error {
}
}
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod.Duration,
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
provisioner,
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
s.ClusterName,
nil,
nil,
nil,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
volumeController.Run()
var rootCA []byte

View File

@ -340,7 +340,7 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source)
},
func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) {
c.FuzzNoCustom(pvc) // fuzz self without calling this function again
types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending}
types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending, api.ClaimLost}
pvc.Status.Phase = types[c.Rand.Intn(len(types))]
},
func(s *api.NamespaceSpec, c fuzz.Continue) {

View File

@ -411,6 +411,10 @@ const (
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
// Represents a host path mapped into a pod.

View File

@ -506,6 +506,10 @@ const (
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
// Represents a host path mapped into a pod.

View File

@ -0,0 +1,426 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"testing"
"k8s.io/kubernetes/pkg/api"
)
// Test single call to syncClaim and syncVolume methods.
// 1. Fill in the controller with initial data
// 2. Call the tested function (syncClaim/syncVolume) via
// controllerTest.testCall *once*.
// 3. Compare resulting volumes and claims with expected volumes and claims.
func TestSync(t *testing.T) {
tests := []controllerTest{
// [Unit test set 1] User did not care which PV they get.
// Test the matching with no claim.Spec.VolumeName and with various
// volumes.
{
// syncClaim binds to a matching unbound volume.
"1-1 - successful bind",
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not do anything when there is no matching volume.
"1-2 - noop",
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim resets claim.Status to Pending when there is no
// matching volume.
"1-3 - reset to Pending",
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds claims to the smallest matching volume
"1-4 - smallest volume",
[]*api.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
},
[]*api.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
},
newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds a claim only to volume that points to it (by
// name), even though a smaller one is available.
"1-5 - prebound volume by name - success",
[]*api.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
},
[]*api.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
},
newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds a claim only to volume that points to it (by
// UID), even though a smaller one is available.
"1-6 - prebound volume by UID - success",
[]*api.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
},
[]*api.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
},
newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not bind claim to a volume prebound to a claim with
// same name and different UID
"1-7 - prebound volume to different claim",
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim completes binding - simulates controller crash after
// PV.ClaimRef is saved
"1-8 - complete bind after crash - PV bound",
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim completes binding - simulates controller crash after
// PV.Status is saved
"1-9 - complete bind after crash - PV status saved",
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending),
newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim completes binding - simulates controller crash after
// PVC.VolumeName is saved
"10 - complete bind after crash - PVC bound",
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted),
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
// [Unit test set 2] User asked for a specific PV.
// Test the binding when pv.ClaimRef is already set by controller or
// by user.
{
// syncClaim with claim pre-bound to a PV that does not exist
"2-1 - claim prebound to non-existing volume - noop",
novolumes,
novolumes,
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending),
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that does not exist.
// Check that the claim status is reset to Pending
"2-2 - claim prebound to non-existing volume - reset status",
novolumes,
novolumes,
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound),
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that exists and is
// unbound. Check it gets bound and no annBoundByController is set.
"2-3 - claim prebound to unbound volume",
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending),
newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// claim with claim pre-bound to a PV that is pre-bound to the claim
// by name. Check it gets bound and no annBoundByController is set.
"2-4 - claim prebound to prebound volume by name",
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending),
newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that is pre-bound to the
// claim by UID. Check it gets bound and no annBoundByController is
// set.
"2-5 - claim prebound to prebound volume by UID",
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending),
newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that is bound to different
// claim. Check it's reset to Pending.
"2-6 - claim prebound to already bound volume",
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound),
newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound by controller to a PV that is bound to
// different claim. Check it throws an error.
"2-7 - claim bound by controller to already bound volume",
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController),
newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController),
noevents, noerrors, testSyncClaimError,
},
// [Unit test set 3] Syncing bound claim
{
// syncClaim with claim bound and its claim.Spec.VolumeName is
// removed. Check it's marked as Lost.
"3-1 - bound claim with missing VolumeName",
novolumes,
novolumes,
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to non-exising volume. Check it's
// marked as Lost.
"3-2 - bound claim with missing volume",
novolumes,
novolumes,
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to unbound volume. Check it's bound.
// Also check that Pending phase is set to Bound
"3-3 - bound claim with unbound volume",
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to volume with missing (or different)
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
"3-4 - bound claim with prebound volume",
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to bound volume. Check that the
// controller does not do anything. Also check that Pending phase is
// set to Bound
"3-5 - bound claim with bound volume",
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to a volume that is bound to different
// claim. Check that the claim is marked as lost.
// TODO: test that an event is emitted
"3-6 - bound claim with bound volume",
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
},
// [Unit test set 4] All syncVolume tests.
{
// syncVolume with pending volume. Check it's marked as Available.
"4-1 - pending volume",
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
noclaims,
noclaims,
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with prebound pending volume. Check it's marked as
// Available.
"4-2 - pending prebound volume",
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
noclaims,
noclaims,
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound to missing claim.
// Check the volume gets Released
"4-3 - bound volume with missing claim",
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain),
noclaims,
noclaims,
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound to claim with different UID.
// Check the volume gets Released.
"4-4 - volume bound to claim with different UID",
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain),
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted),
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by controller to unbound claim.
// Check syncVolume does not do anything.
"4-5 - volume bound by controller to unbound claim",
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by user to unbound claim.
// Check syncVolume does not do anything.
"4-5 - volume bound by user to bound claim",
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound to bound claim.
// Check that the volume is marked as Bound.
"4-6 - volume bound by to bound claim",
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound),
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by controller to claim bound to
// another volume. Check that the volume is rolled back.
"4-7 - volume bound by controller to claim bound somewhere else",
newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound),
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by user to claim bound to
// another volume. Check that the volume is marked as Available
// and its UID is reset.
"4-8 - volume bound by user to claim bound somewhere else",
newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound),
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound),
noevents, noerrors, testSyncVolume,
},
}
runSyncTests(t, tests)
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestMultiSync(t *testing.T) {
tests := []controllerTest{
// Test simple binding
{
// syncClaim binds to a matching unbound volume.
"10-1 - successful bind",
newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending),
newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// Two controllers bound two PVs to single claim. Test one of them
// wins and the second rolls back.
"10-2 - bind PV race",
[]*api.PersistentVolume{
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
},
[]*api.PersistentVolume{
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
},
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
}
runMultisyncTests(t, tests)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,390 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
vol "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
// This file contains the controller base functionality, i.e. framework to
// process PV/PVC added/updated/deleted events. The real binding, provisioning,
// recycling and deleting is done in controller.go
// NewPersistentVolumeController creates a new PersistentVolumeController
func NewPersistentVolumeController(
kubeClient clientset.Interface,
syncPeriod time.Duration,
provisioner vol.ProvisionableVolumePlugin,
recyclers []vol.VolumePlugin,
cloud cloudprovider.Interface,
clusterName string,
volumeSource, claimSource cache.ListerWatcher,
eventRecorder record.EventRecorder,
) *PersistentVolumeController {
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"})
}
controller := &PersistentVolumeController{
kubeClient: kubeClient,
eventRecorder: eventRecorder,
runningOperations: make(map[string]bool),
cloud: cloud,
provisioner: provisioner,
clusterName: clusterName,
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
createProvisionedPVInterval: createProvisionedPVInterval,
}
controller.recyclePluginMgr.InitPlugins(recyclers, controller)
if controller.provisioner != nil {
if err := controller.provisioner.Init(controller); err != nil {
glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err)
}
}
if volumeSource == nil {
volumeSource = &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumes().Watch(options)
},
}
}
if claimSource == nil {
claimSource = &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
},
}
}
controller.volumes.store, controller.volumeController = framework.NewIndexerInformer(
volumeSource,
&api.PersistentVolume{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.addVolume,
UpdateFunc: controller.updateVolume,
DeleteFunc: controller.deleteVolume,
},
cache.Indexers{"accessmodes": accessModesIndexFunc},
)
controller.claims, controller.claimController = framework.NewInformer(
claimSource,
&api.PersistentVolumeClaim{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.addClaim,
UpdateFunc: controller.updateClaim,
DeleteFunc: controller.deleteClaim,
},
)
return controller
}
// addVolume is callback from framework.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
if !ctrl.isFullySynced() {
return
}
pv, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("expected PersistentVolume but handler received %+v", obj)
return
}
if err := ctrl.syncVolume(pv); err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
} else {
glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
}
}
}
// updateVolume is callback from framework.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
if !ctrl.isFullySynced() {
return
}
newVolume, ok := newObj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Expected PersistentVolume but handler received %+v", newObj)
return
}
if err := ctrl.syncVolume(newVolume); err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
} else {
glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
}
}
}
// deleteVolume is callback from framework.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
if !ctrl.isFullySynced() {
return
}
var volume *api.PersistentVolume
var ok bool
volume, ok = obj.(*api.PersistentVolume)
if !ok {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
volume, ok = unknown.Obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj)
return
}
} else {
glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj)
return
}
}
if !ok || volume == nil || volume.Spec.ClaimRef == nil {
return
}
if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists {
if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil {
// sync the claim when its volume is deleted. Explicitly syncing the
// claim here in response to volume deletion prevents the claim from
// waiting until the next sync period for its Lost status.
err := ctrl.syncClaim(claim)
if err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the
// controller recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err)
} else {
glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", claimToClaimKey(claim), err)
}
}
} else {
glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj)
}
}
}
// addClaim is callback from framework.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
if !ctrl.isFullySynced() {
return
}
claim, ok := obj.(*api.PersistentVolumeClaim)
if !ok {
glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj)
return
}
if err := ctrl.syncClaim(claim); err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err)
} else {
glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err)
}
}
}
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
if !ctrl.isFullySynced() {
return
}
newClaim, ok := newObj.(*api.PersistentVolumeClaim)
if !ok {
glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj)
return
}
if err := ctrl.syncClaim(newClaim); err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
} else {
glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
}
}
}
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
if !ctrl.isFullySynced() {
return
}
var volume *api.PersistentVolume
var claim *api.PersistentVolumeClaim
var ok bool
claim, ok = obj.(*api.PersistentVolumeClaim)
if !ok {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
claim, ok = unknown.Obj.(*api.PersistentVolumeClaim)
if !ok {
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj)
return
}
} else {
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj)
return
}
}
if !ok || claim == nil {
return
}
if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists {
if volume, ok = pvObj.(*api.PersistentVolume); ok {
// sync the volume when its claim is deleted. Explicitly sync'ing the
// volume here in response to claim deletion prevents the volume from
// waiting until the next sync period for its Release.
if volume != nil {
err := ctrl.syncVolume(volume)
if err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the
// controller recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err)
} else {
glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err)
}
}
}
} else {
glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj)
}
}
}
// Run starts all of this controller's control loops
func (ctrl *PersistentVolumeController) Run() {
glog.V(4).Infof("starting PersistentVolumeController")
if ctrl.volumeControllerStopCh == nil {
ctrl.volumeControllerStopCh = make(chan struct{})
go ctrl.volumeController.Run(ctrl.volumeControllerStopCh)
}
if ctrl.claimControllerStopCh == nil {
ctrl.claimControllerStopCh = make(chan struct{})
go ctrl.claimController.Run(ctrl.claimControllerStopCh)
}
}
// Stop gracefully shuts down this controller
func (ctrl *PersistentVolumeController) Stop() {
glog.V(4).Infof("stopping PersistentVolumeController")
close(ctrl.volumeControllerStopCh)
close(ctrl.claimControllerStopCh)
}
// isFullySynced returns true, if both volume and claim caches are fully loaded
// after startup.
// We do not want to process events with not fully loaded caches - e.g. we might
// recycle/delete PVs that don't have corresponding claim in the cache yet.
func (ctrl *PersistentVolumeController) isFullySynced() bool {
return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced()
}
// Stateless functions
func hasAnnotation(obj api.ObjectMeta, ann string) bool {
_, found := obj.Annotations[ann]
return found
}
func setAnnotation(obj *api.ObjectMeta, ann string, value string) {
if obj.Annotations == nil {
obj.Annotations = make(map[string]string)
}
obj.Annotations[ann] = value
}
func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string {
bound := hasAnnotation(claim.ObjectMeta, annBindCompleted)
boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController)
return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
}
func getVolumeStatusForLogging(volume *api.PersistentVolume) string {
boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController)
claimName := ""
if volume.Spec.ClaimRef != nil {
claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)
}
return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
}
// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound
// to specific claim. Both claim.Name and claim.Namespace must be equal.
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool {
if volume.Spec.ClaimRef == nil {
return false
}
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
return false
}
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
return false
}
return true
}

View File

@ -0,0 +1,161 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
)
// Test the real controller methods (add/update/delete claim/volume) with
// a fake API server.
// There is no controller API to 'initiate syncAll now', therefore these tests
// can't reliably simulate periodic sync of volumes/claims - it would be
// either very timing-sensitive or slow to wait for real periodic sync.
func TestControllerSync(t *testing.T) {
tests := []controllerTest{
// [Unit test set 5] - controller tests.
// We test the controller as if
// it was connected to real API server, i.e. we call add/update/delete
// Claim/Volume methods. Also, all changes to volumes and claims are
// sent to add/update/delete Claim/Volume as real controller would do.
{
// addVolume gets a new volume. Check it's marked as Available and
// that it's not bound to any claim - we bind volumes on periodic
// syncClaim, not on addVolume.
"5-1 - addVolume",
novolumes, /* added in testCall below */
newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending),
newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending),
noevents, noerrors,
// Custom test function that generates an add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)
reactor.volumes[volume.Name] = volume
reactor.volumeSource.Add(volume)
return nil
},
},
{
// addClaim gets a new claim. Check it's bound to a volume.
"5-2 - complete bind",
newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
noclaims, /* added in testAddClaim5_2 */
newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors,
// Custom test function that generates an add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending)
reactor.claims[claim.Name] = claim
reactor.claimSource.Add(claim)
return nil
},
},
{
// deleteClaim with a bound claim makes bound volume released.
"5-3 - delete claim",
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted),
noclaims,
noevents, noerrors,
// Custom test function that generates a delete event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
obj := ctrl.claims.List()[0]
claim := obj.(*api.PersistentVolumeClaim)
// Remove the claim from list of resulting claims.
delete(reactor.claims, claim.Name)
// Poke the controller with deletion event. Cloned claim is
// needed to prevent races (and we would get a clone from etcd
// too).
clone, _ := conversion.NewCloner().DeepCopy(claim)
claimClone := clone.(*api.PersistentVolumeClaim)
reactor.claimSource.Delete(claimClone)
return nil
},
},
{
// deleteVolume with a bound volume. Check the claim is Lost.
"5-4 - delete volume",
newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
novolumes,
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors,
// Custom test function that generates a delete event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
obj := ctrl.volumes.store.List()[0]
volume := obj.(*api.PersistentVolume)
// Remove the volume from list of resulting volumes.
delete(reactor.volumes, volume.Name)
// Poke the controller with deletion event. Cloned volume is
// needed to prevent races (and we would get a clone from etcd
// too).
clone, _ := conversion.NewCloner().DeepCopy(volume)
volumeClone := clone.(*api.PersistentVolume)
reactor.volumeSource.Delete(volumeClone)
return nil
},
},
}
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
volumeSource := framework.NewFakeControllerSource()
claimSource := framework.NewFakeControllerSource()
ctrl := newTestController(client, volumeSource, claimSource)
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
for _, claim := range test.initialClaims {
claimSource.Add(claim)
reactor.claims[claim.Name] = claim
}
for _, volume := range test.initialVolumes {
volumeSource.Add(volume)
reactor.volumes[volume.Name] = volume
}
// Start the controller
defer ctrl.Stop()
go ctrl.Run()
// Wait for the controller to pass initial sync.
for !ctrl.isFullySynced() {
time.Sleep(10 * time.Millisecond)
}
// Call the tested function
err := test.test(ctrl, reactor, test)
if err != nil {
t.Errorf("Test %q initial test call failed: %v", test.name, err)
}
reactor.waitTest()
evaluateTestResults(ctrl, reactor, test, t)
}
}

View File

@ -0,0 +1,169 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
)
// Test single call to syncVolume, expecting recycling to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestDeleteSync(t *testing.T) {
tests := []controllerTest{
{
// delete volume bound by controller
"8-1 - successful delete",
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete volume bound by user
"8-2 - successful delete with prebound volume",
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", api.VolumeBound, api.PersistentVolumeReclaimDelete),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete failure - plugin not found
"8-3 - plugin not found",
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeBound, api.PersistentVolumeReclaimDelete),
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume,
},
{
// delete failure - newDeleter returns error
"8-4 - newDeleter returns error",
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeBound, api.PersistentVolumeReclaimDelete),
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume),
},
{
// delete failure - delete() returns error
"8-5 - delete returns error",
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeBound, api.PersistentVolumeReclaimDelete),
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume),
},
{
// delete success(?) - volume is deleted before doDelete() starts
"8-6 - volume is deleted before deleting",
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", api.VolumeBound, api.PersistentVolumeReclaimDelete),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Delete the volume before delete operation starts
reactor.lock.Lock()
delete(reactor.volumes, "volume8-6")
reactor.lock.Unlock()
}),
},
{
// delete success(?) - volume is bound just at the time doDelete()
// starts. This simulates "volume no longer needs recycling,
// skipping".
"8-7 - volume is bound before deleting",
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
noclaims,
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound),
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
reactor.lock.Lock()
defer reactor.lock.Unlock()
// Bind the volume to ressurected claim (this should never
// happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound)
reactor.claims[claim.Name] = claim
ctrl.claims.Add(claim)
volume := reactor.volumes["volume8-7"]
volume.Status.Phase = api.VolumeBound
}),
},
{
// delete success - volume bound by user is deleted, while a new
// claim is created with another UID.
"8-9 - prebound volume is deleted while the claim exists",
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", api.VolumeBound, api.PersistentVolumeReclaimDelete),
novolumes,
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending),
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending),
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
},
}
runSyncTests(t, tests)
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestDeleteMultiSync(t *testing.T) {
tests := []controllerTest{
{
// delete failure - delete returns error. The controller should
// try again.
"9-1 - delete returns error",
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", api.VolumeBound, api.PersistentVolumeReclaimDelete),
novolumes,
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests)
}

File diff suppressed because it is too large Load Diff

View File

@ -24,34 +24,9 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
)
const (
// A PVClaim can request a quality of service tier by adding this annotation. The value of the annotation
// is arbitrary. The values are pre-defined by a cluster admin and known to users when requesting a QoS.
// For example tiers might be gold, silver, and tin and the admin configures what that means for each volume plugin that can provision a volume.
// Values in the alpha version of this feature are not meaningful, but will be in the full version of this feature.
qosProvisioningKey = "volume.alpha.kubernetes.io/storage-class"
// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD)
// with namespace of a persistent volume claim used to create this volume.
cloudVolumeCreatedForClaimNamespaceTag = "kubernetes.io/created-for/pvc/namespace"
// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD)
// with name of a persistent volume claim used to create this volume.
cloudVolumeCreatedForClaimNameTag = "kubernetes.io/created-for/pvc/name"
// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD)
// with name of appropriate Kubernetes persistent volume .
cloudVolumeCreatedForVolumeNameTag = "kubernetes.io/created-for/pv/name"
)
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity.
type persistentVolumeOrderedIndex struct {
cache.Indexer
}
var _ cache.Store = &persistentVolumeOrderedIndex{} // persistentVolumeOrderedIndex is a Store
func NewPersistentVolumeOrderedIndex() *persistentVolumeOrderedIndex {
return &persistentVolumeOrderedIndex{
cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc}),
}
store cache.Indexer
}
// accessModesIndexFunc is an indexing function that returns a persistent volume's AccessModes as a string
@ -63,15 +38,15 @@ func accessModesIndexFunc(obj interface{}) ([]string, error) {
return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj)
}
// ListByAccessModes returns all volumes with the given set of AccessModeTypes *in order* of their storage capacity (low to high)
func (pvIndex *persistentVolumeOrderedIndex) ListByAccessModes(modes []api.PersistentVolumeAccessMode) ([]*api.PersistentVolume, error) {
// listByAccessModes returns all volumes with the given set of AccessModeTypes *in order* of their storage capacity (low to high)
func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []api.PersistentVolumeAccessMode) ([]*api.PersistentVolume, error) {
pv := &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
AccessModes: modes,
},
}
objs, err := pvIndex.Index("accessmodes", pv)
objs, err := pvIndex.store.Index("accessmodes", pv)
if err != nil {
return nil, err
}
@ -101,7 +76,7 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes)
for _, modes := range allPossibleModes {
volumes, err := pvIndex.ListByAccessModes(modes)
volumes, err := pvIndex.listByAccessModes(modes)
if err != nil {
return nil, err
}
@ -117,16 +92,20 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo
continue
}
if claim.Name == volume.Spec.ClaimRef.Name && claim.Namespace == volume.Spec.ClaimRef.Namespace && claim.UID == volume.Spec.ClaimRef.UID {
// exact match! No search required.
if isVolumeBoundToClaim(volume, claim) {
// Exact match! No search required.
return volume, nil
}
}
// a claim requesting provisioning will have an exact match pre-bound to the claim.
// no need to search through unbound volumes. The matching volume will be created by the provisioner
// and will match above when the claim is re-processed by the binder.
if keyExists(qosProvisioningKey, claim.Annotations) {
// We want to provision volumes if the annotation is set even if there
// is matching PV. Therefore, do not look for available PV and let
// a new volume to be provisioned.
//
// When provisioner creates a new PV to this claim, an exact match
// pre-bound to the claim will be found by the checks above during
// subsequent claim sync.
if hasAnnotation(claim.ObjectMeta, annClass) {
return nil, nil
}
@ -213,7 +192,7 @@ func matchStorageCapacity(pvA, pvB *api.PersistentVolume) bool {
//
func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []api.PersistentVolumeAccessMode) [][]api.PersistentVolumeAccessMode {
matchedModes := [][]api.PersistentVolumeAccessMode{}
keys := pvIndex.Indexer.ListIndexFuncValues("accessmodes")
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
for _, key := range keys {
indexedModes := api.GetAccessModesFromString(key)
if containedInAll(indexedModes, requestedModes) {
@ -265,3 +244,7 @@ func (c byAccessModes) Len() int {
func claimToClaimKey(claim *api.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
func claimrefToClaimKey(claimref *api.ObjectReference) string {
return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name)
}

View File

@ -22,12 +22,17 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/cache"
)
func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex {
return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})}
}
func TestMatchVolume(t *testing.T) {
volList := NewPersistentVolumeOrderedIndex()
volList := newPersistentVolumeOrderedIndex()
for _, pv := range createTestVolumes() {
volList.Add(pv)
volList.store.Add(pv)
}
scenarios := map[string]struct {
@ -122,7 +127,7 @@ func TestMatchVolume(t *testing.T) {
}
func TestMatchingWithBoundVolumes(t *testing.T) {
volumeIndex := NewPersistentVolumeOrderedIndex()
volumeIndex := newPersistentVolumeOrderedIndex()
// two similar volumes, one is bound
pv1 := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
@ -158,8 +163,8 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
},
}
volumeIndex.Add(pv1)
volumeIndex.Add(pv2)
volumeIndex.store.Add(pv1)
volumeIndex.store.Add(pv2)
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
@ -189,12 +194,12 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
}
func TestSort(t *testing.T) {
volList := NewPersistentVolumeOrderedIndex()
volList := newPersistentVolumeOrderedIndex()
for _, pv := range createTestVolumes() {
volList.Add(pv)
volList.store.Add(pv)
}
volumes, err := volList.ListByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany})
volumes, err := volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany})
if err != nil {
t.Error("Unexpected error retrieving volumes by access modes:", err)
}
@ -205,7 +210,7 @@ func TestSort(t *testing.T) {
}
}
volumes, err = volList.ListByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany})
volumes, err = volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany})
if err != nil {
t.Error("Unexpected error retrieving volumes by access modes:", err)
}
@ -218,9 +223,9 @@ func TestSort(t *testing.T) {
}
func TestAllPossibleAccessModes(t *testing.T) {
index := NewPersistentVolumeOrderedIndex()
index := newPersistentVolumeOrderedIndex()
for _, pv := range createTestVolumes() {
index.Add(pv)
index.store.Add(pv)
}
// the mock PVs creates contain 2 types of accessmodes: RWO+ROX and RWO+ROW+RWX
@ -292,10 +297,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
},
}
index := NewPersistentVolumeOrderedIndex()
index.Add(gce)
index.Add(ebs)
index.Add(nfs)
index := newPersistentVolumeOrderedIndex()
index.store.Add(gce)
index.store.Add(ebs)
index.store.Add(nfs)
volume, _ := index.findBestMatchForClaim(claim)
if volume.Name != ebs.Name {
@ -521,10 +526,10 @@ func TestFindingPreboundVolumes(t *testing.T) {
pv5 := testVolume("pv5", "5Gi")
pv8 := testVolume("pv8", "8Gi")
index := NewPersistentVolumeOrderedIndex()
index.Add(pv1)
index.Add(pv5)
index.Add(pv8)
index := newPersistentVolumeOrderedIndex()
index.store.Add(pv1)
index.store.Add(pv5)
index.store.Add(pv8)
// expected exact match on size
volume, _ := index.findBestMatchForClaim(claim)

View File

@ -1,530 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
// PersistentVolumeClaimBinder is a controller that synchronizes PersistentVolumeClaims.
type PersistentVolumeClaimBinder struct {
volumeIndex *persistentVolumeOrderedIndex
volumeController *framework.Controller
claimController *framework.Controller
client binderClient
stopChannels map[string]chan struct{}
lock sync.RWMutex
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("pv_claim_binder_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
volumeIndex := NewPersistentVolumeOrderedIndex()
binderClient := NewBinderClient(kubeClient)
binder := &PersistentVolumeClaimBinder{
volumeIndex: volumeIndex,
client: binderClient,
}
_, volumeController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumes().Watch(options)
},
},
&api.PersistentVolume{},
// TODO: Can we have much longer period here?
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: binder.addVolume,
UpdateFunc: binder.updateVolume,
DeleteFunc: binder.deleteVolume,
},
)
_, claimController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
},
},
&api.PersistentVolumeClaim{},
// TODO: Can we have much longer period here?
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: binder.addClaim,
UpdateFunc: binder.updateClaim,
DeleteFunc: binder.deleteClaim,
},
)
binder.claimController = claimController
binder.volumeController = volumeController
return binder
}
func (binder *PersistentVolumeClaimBinder) addVolume(obj interface{}) {
binder.lock.Lock()
defer binder.lock.Unlock()
pv, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Expected PersistentVolume but handler received %+v", obj)
return
}
if err := syncVolume(binder.volumeIndex, binder.client, pv); err != nil {
glog.Errorf("PVClaimBinder could not add volume %s: %+v", pv.Name, err)
}
}
func (binder *PersistentVolumeClaimBinder) updateVolume(oldObj, newObj interface{}) {
binder.lock.Lock()
defer binder.lock.Unlock()
newVolume, ok := newObj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Expected PersistentVolume but handler received %+v", newObj)
return
}
if err := binder.volumeIndex.Update(newVolume); err != nil {
glog.Errorf("Error updating volume %s in index: %v", newVolume.Name, err)
return
}
if err := syncVolume(binder.volumeIndex, binder.client, newVolume); err != nil {
glog.Errorf("PVClaimBinder could not update volume %s: %+v", newVolume.Name, err)
}
}
func (binder *PersistentVolumeClaimBinder) deleteVolume(obj interface{}) {
binder.lock.Lock()
defer binder.lock.Unlock()
volume, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Expected PersistentVolume but handler received %+v", obj)
return
}
if err := binder.volumeIndex.Delete(volume); err != nil {
glog.Errorf("Error deleting volume %s from index: %v", volume.Name, err)
}
}
func (binder *PersistentVolumeClaimBinder) addClaim(obj interface{}) {
binder.lock.Lock()
defer binder.lock.Unlock()
claim, ok := obj.(*api.PersistentVolumeClaim)
if !ok {
glog.Errorf("Expected PersistentVolumeClaim but handler received %+v", obj)
return
}
if err := syncClaim(binder.volumeIndex, binder.client, claim); err != nil {
glog.Errorf("PVClaimBinder could not add claim %s: %+v", claim.Name, err)
}
}
func (binder *PersistentVolumeClaimBinder) updateClaim(oldObj, newObj interface{}) {
binder.lock.Lock()
defer binder.lock.Unlock()
newClaim, ok := newObj.(*api.PersistentVolumeClaim)
if !ok {
glog.Errorf("Expected PersistentVolumeClaim but handler received %+v", newObj)
return
}
if err := syncClaim(binder.volumeIndex, binder.client, newClaim); err != nil {
glog.Errorf("PVClaimBinder could not update claim %s: %+v", newClaim.Name, err)
}
}
func (binder *PersistentVolumeClaimBinder) deleteClaim(obj interface{}) {
binder.lock.Lock()
defer binder.lock.Unlock()
var volume *api.PersistentVolume
if pvc, ok := obj.(*api.PersistentVolumeClaim); ok {
if pvObj, exists, _ := binder.volumeIndex.GetByKey(pvc.Spec.VolumeName); exists {
if pv, ok := pvObj.(*api.PersistentVolume); ok {
volume = pv
}
}
}
if unk, ok := obj.(cache.DeletedFinalStateUnknown); ok && unk.Obj != nil {
if pv, ok := unk.Obj.(*api.PersistentVolume); ok {
volume = pv
}
}
// sync the volume when its claim is deleted. Explicitly sync'ing the volume here in response to
// claim deletion prevents the volume from waiting until the next sync period for its Release.
if volume != nil {
err := syncVolume(binder.volumeIndex, binder.client, volume)
if err != nil {
glog.Errorf("PVClaimBinder could not update volume %s from deleteClaim handler: %+v", volume.Name, err)
}
}
}
func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, volume *api.PersistentVolume) (err error) {
glog.V(5).Infof("Synchronizing PersistentVolume[%s], current phase: %s\n", volume.Name, volume.Status.Phase)
// The PV may have been modified by parallel call to syncVolume, load
// the current version.
newPv, err := binderClient.GetPersistentVolume(volume.Name)
if err != nil {
return fmt.Errorf("Cannot reload volume %s: %v", volume.Name, err)
}
volume = newPv
// volumes can be in one of the following states:
//
// VolumePending -- default value -- not bound to a claim and not yet processed through this controller.
// VolumeAvailable -- not bound to a claim, but processed at least once and found in this controller's volumeIndex.
// VolumeBound -- bound to a claim because volume.Spec.ClaimRef != nil. Claim status may not be correct.
// VolumeReleased -- volume.Spec.ClaimRef != nil but the claim has been deleted by the user.
// VolumeFailed -- volume.Spec.ClaimRef != nil and the volume failed processing in the recycler
currentPhase := volume.Status.Phase
nextPhase := currentPhase
// Always store the newest volume state in local cache.
_, exists, err := volumeIndex.Get(volume)
if err != nil {
return err
}
if !exists {
volumeIndex.Add(volume)
} else {
volumeIndex.Update(volume)
}
if isBeingProvisioned(volume) {
glog.V(4).Infof("Skipping PersistentVolume[%s], waiting for provisioning to finish", volume.Name)
return nil
}
switch currentPhase {
case api.VolumePending:
// 4 possible states:
// 1. ClaimRef != nil, Claim exists, Claim UID == ClaimRef UID: Prebound to claim. Make volume available for binding (it will match PVC).
// 2. ClaimRef != nil, Claim exists, Claim UID != ClaimRef UID: Recently recycled. Remove bind. Make volume available for new claim.
// 3. ClaimRef != nil, Claim !exists: Recently recycled. Remove bind. Make volume available for new claim.
// 4. ClaimRef == nil: Neither recycled nor prebound. Make volume available for binding.
nextPhase = api.VolumeAvailable
if volume.Spec.ClaimRef != nil {
claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
switch {
case err != nil && !errors.IsNotFound(err):
return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
case errors.IsNotFound(err) || (claim != nil && claim.UID != volume.Spec.ClaimRef.UID):
glog.V(5).Infof("PersistentVolume[%s] has a claim ref to a claim which does not exist", volume.Name)
if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle {
// Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
// removing ClaimRef unbinds the volume
clone, err := conversion.NewCloner().DeepCopy(volume)
if err != nil {
return fmt.Errorf("Error cloning pv: %v", err)
}
volumeClone, ok := clone.(*api.PersistentVolume)
if !ok {
return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
}
glog.V(5).Infof("PersistentVolume[%s] is recently recycled; remove claimRef.", volume.Name)
volumeClone.Spec.ClaimRef = nil
if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
return fmt.Errorf("Unexpected error saving PersistentVolume: %+v", err)
} else {
volume = updatedVolume
volumeIndex.Update(volume)
}
} else {
// Pending volumes that has a ClaimRef and the claim is missing and is was not recycled.
// It must have been freshly provisioned and the claim was deleted during the provisioning.
// Mark the volume as Released, it will be deleted.
nextPhase = api.VolumeReleased
}
}
// Dynamically provisioned claims remain Pending until its volume is completely provisioned.
// The provisioner updates the PV and triggers this update for the volume. Explicitly sync'ing
// the claim here prevents the need to wait until the next sync period when the claim would normally
// advance to Bound phase. Otherwise, the maximum wait time for the claim to be Bound is the default sync period.
if claim != nil && claim.Status.Phase == api.ClaimPending && keyExists(qosProvisioningKey, claim.Annotations) && isProvisioningComplete(volume) {
syncClaim(volumeIndex, binderClient, claim)
}
}
glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name)
// available volumes await a claim
case api.VolumeAvailable:
if volume.Spec.ClaimRef != nil {
_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
if err == nil {
// change of phase will trigger an update event with the newly bound volume
glog.V(5).Infof("PersistentVolume[%s] is now bound\n", volume.Name)
nextPhase = api.VolumeBound
} else {
if errors.IsNotFound(err) {
nextPhase = api.VolumeReleased
}
}
}
//bound volumes require verification of their bound claims
case api.VolumeBound:
if volume.Spec.ClaimRef == nil {
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
} else {
claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
// A volume is Released when its bound claim cannot be found in the API server.
// A claim by the same name can be found if deleted and recreated before this controller can release
// the volume from the original claim, so a UID check is necessary.
if err != nil {
if errors.IsNotFound(err) {
nextPhase = api.VolumeReleased
} else {
return err
}
} else if claim != nil && claim.UID != volume.Spec.ClaimRef.UID {
nextPhase = api.VolumeReleased
}
}
// released volumes require recycling
case api.VolumeReleased:
if volume.Spec.ClaimRef == nil {
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
} else {
// another process is watching for released volumes.
// PersistentVolumeReclaimPolicy is set per PersistentVolume
// Recycle - sets the PV to Pending and back under this controller's management
// Delete - delete events are handled by this controller's watch. PVs are removed from the index.
}
// volumes are removed by processes external to this binder and must be removed from the cluster
case api.VolumeFailed:
if volume.Spec.ClaimRef == nil {
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
} else {
glog.V(5).Infof("PersistentVolume[%s] previously failed recycling. Skipping.\n", volume.Name)
}
}
if currentPhase != nextPhase {
volume.Status.Phase = nextPhase
// a change in state will trigger another update through this controller.
// each pass through this controller evaluates current phase and decides whether or not to change to the next phase
glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", volume.Name, currentPhase, nextPhase)
volume, err := binderClient.UpdatePersistentVolumeStatus(volume)
if err != nil {
// Rollback to previous phase
volume.Status.Phase = currentPhase
}
volumeIndex.Update(volume)
}
return nil
}
func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, claim *api.PersistentVolumeClaim) (err error) {
glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s] for binding", claim.Name)
// The claim may have been modified by parallel call to syncClaim, load
// the current version.
newClaim, err := binderClient.GetPersistentVolumeClaim(claim.Namespace, claim.Name)
if err != nil {
return fmt.Errorf("Cannot reload claim %s/%s: %v", claim.Namespace, claim.Name, err)
}
claim = newClaim
switch claim.Status.Phase {
case api.ClaimPending:
// claims w/ a storage-class annotation for provisioning with *only* match volumes with a ClaimRef of the claim.
volume, err := volumeIndex.findBestMatchForClaim(claim)
if err != nil {
return err
}
if volume == nil {
glog.V(5).Infof("A volume match does not exist for persistent claim: %s", claim.Name)
return nil
}
if isBeingProvisioned(volume) {
glog.V(5).Infof("PersistentVolume[%s] for PersistentVolumeClaim[%s/%s] is still being provisioned.", volume.Name, claim.Namespace, claim.Name)
return nil
}
claimRef, err := api.GetReference(claim)
if err != nil {
return fmt.Errorf("Unexpected error getting claim reference: %v\n", err)
}
// Make a binding reference to the claim by persisting claimRef on the volume.
// The local cache must be updated with the new bind to prevent subsequent
// claims from binding to the volume.
if volume.Spec.ClaimRef == nil {
clone, err := conversion.NewCloner().DeepCopy(volume)
if err != nil {
return fmt.Errorf("Error cloning pv: %v", err)
}
volumeClone, ok := clone.(*api.PersistentVolume)
if !ok {
return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
}
volumeClone.Spec.ClaimRef = claimRef
if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err)
} else {
volume = updatedVolume
volumeIndex.Update(updatedVolume)
}
}
// the bind is persisted on the volume above and will always match the claim in a search.
// claim would remain Pending if the update fails, so processing this state is idempotent.
// this only needs to be processed once.
if claim.Spec.VolumeName != volume.Name {
claim.Spec.VolumeName = volume.Name
claim, err = binderClient.UpdatePersistentVolumeClaim(claim)
if err != nil {
return fmt.Errorf("Error updating claim with VolumeName %s: %+v\n", volume.Name, err)
}
}
claim.Status.Phase = api.ClaimBound
claim.Status.AccessModes = volume.Spec.AccessModes
claim.Status.Capacity = volume.Spec.Capacity
_, err = binderClient.UpdatePersistentVolumeClaimStatus(claim)
if err != nil {
return fmt.Errorf("Unexpected error saving claim status: %+v", err)
}
case api.ClaimBound:
// no-op. Claim is bound, values from PV are set. PVCs are technically mutable in the API server
// and we don't want to handle those changes at this time.
default:
return fmt.Errorf("Unknown state for PVC: %#v", claim)
}
glog.V(5).Infof("PersistentVolumeClaim[%s] is bound\n", claim.Name)
return nil
}
func isBeingProvisioned(volume *api.PersistentVolume) bool {
value, found := volume.Annotations[pvProvisioningRequiredAnnotationKey]
if found && value != pvProvisioningCompletedAnnotationValue {
return true
}
return false
}
// Run starts all of this binder's control loops
func (controller *PersistentVolumeClaimBinder) Run() {
glog.V(5).Infof("Starting PersistentVolumeClaimBinder\n")
if controller.stopChannels == nil {
controller.stopChannels = make(map[string]chan struct{})
}
if _, exists := controller.stopChannels["volumes"]; !exists {
controller.stopChannels["volumes"] = make(chan struct{})
go controller.volumeController.Run(controller.stopChannels["volumes"])
}
if _, exists := controller.stopChannels["claims"]; !exists {
controller.stopChannels["claims"] = make(chan struct{})
go controller.claimController.Run(controller.stopChannels["claims"])
}
}
// Stop gracefully shuts down this binder
func (controller *PersistentVolumeClaimBinder) Stop() {
glog.V(5).Infof("Stopping PersistentVolumeClaimBinder\n")
for name, stopChan := range controller.stopChannels {
close(stopChan)
delete(controller.stopChannels, name)
}
}
// binderClient abstracts access to PVs and PVCs
type binderClient interface {
GetPersistentVolume(name string) (*api.PersistentVolume, error)
UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error)
DeletePersistentVolume(volume *api.PersistentVolume) error
UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error)
UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
}
func NewBinderClient(c clientset.Interface) binderClient {
return &realBinderClient{c}
}
type realBinderClient struct {
client clientset.Interface
}
func (c *realBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Get(name)
}
func (c *realBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Update(volume)
}
func (c *realBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
return c.client.Core().PersistentVolumes().Delete(volume.Name, nil)
}
func (c *realBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().UpdateStatus(volume)
}
func (c *realBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
return c.client.Core().PersistentVolumeClaims(namespace).Get(name)
}
func (c *realBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
return c.client.Core().PersistentVolumeClaims(claim.Namespace).Update(claim)
}
func (c *realBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
return c.client.Core().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
}

View File

@ -1,732 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"os"
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/types"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/host_path"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestRunStop(t *testing.T) {
clientset := fake.NewSimpleClientset()
binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)
if len(binder.stopChannels) != 0 {
t.Errorf("Non-running binder should not have any stopChannels. Got %v", len(binder.stopChannels))
}
binder.Run()
if len(binder.stopChannels) != 2 {
t.Errorf("Running binder should have exactly 2 stopChannels. Got %v", len(binder.stopChannels))
}
binder.Stop()
if len(binder.stopChannels) != 0 {
t.Errorf("Non-running binder should not have any stopChannels. Got %v", len(binder.stopChannels))
}
}
func TestClaimRace(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("claimbinder-test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
c1 := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "c1",
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
},
},
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimPending,
},
}
c1.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
c2 := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "c2",
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
},
},
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimPending,
},
}
c2.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
v := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: fmt.Sprintf("%s/data01", tmpDir),
},
},
},
Status: api.PersistentVolumeStatus{
Phase: api.VolumePending,
},
}
volumeIndex := NewPersistentVolumeOrderedIndex()
mockClient := &mockBinderClient{}
mockClient.volume = v
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
// adds the volume to the index, making the volume available
syncVolume(volumeIndex, mockClient, v)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
if _, exists, _ := volumeIndex.Get(v); !exists {
t.Errorf("Expected to find volume in index but it did not exist")
}
// add the claim to fake API server
mockClient.UpdatePersistentVolumeClaim(c1)
// an initial sync for a claim matches the volume
err = syncClaim(volumeIndex, mockClient, c1)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if c1.Status.Phase != api.ClaimBound {
t.Errorf("Expected phase %s but got %s", api.ClaimBound, c1.Status.Phase)
}
// before the volume gets updated w/ claimRef, a 2nd claim can attempt to bind and find the same volume
// add the 2nd claim to fake API server
mockClient.UpdatePersistentVolumeClaim(c2)
err = syncClaim(volumeIndex, mockClient, c2)
if err != nil {
t.Errorf("unexpected error for unmatched claim: %v", err)
}
if c2.Status.Phase != api.ClaimPending {
t.Errorf("Expected phase %s but got %s", api.ClaimPending, c2.Status.Phase)
}
}
func TestNewClaimWithSameNameAsOldClaim(t *testing.T) {
c1 := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "c1",
Namespace: "foo",
UID: "12345",
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
},
},
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
c1.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
v := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
Spec: api.PersistentVolumeSpec{
ClaimRef: &api.ObjectReference{
Name: c1.Name,
Namespace: c1.Namespace,
UID: "45678",
},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/tmp/data01",
},
},
},
Status: api.PersistentVolumeStatus{
Phase: api.VolumeBound,
},
}
volumeIndex := NewPersistentVolumeOrderedIndex()
mockClient := &mockBinderClient{
claim: c1,
volume: v,
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
syncVolume(volumeIndex, mockClient, v)
if mockClient.volume.Status.Phase != api.VolumeReleased {
t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase)
}
}
func TestClaimSyncAfterVolumeProvisioning(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("claimbinder-test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Tests that binder.syncVolume will also syncClaim if the PV has completed
// provisioning but the claim is still Pending. We want to advance to Bound
// without having to wait until the binder's next sync period.
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: "bar",
Annotations: map[string]string{
qosProvisioningKey: "foo",
},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
},
},
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimPending,
},
}
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
claimRef, _ := api.GetReference(claim)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
pvProvisioningRequiredAnnotationKey: pvProvisioningCompletedAnnotationValue,
},
},
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: fmt.Sprintf("%s/data01", tmpDir),
},
},
ClaimRef: claimRef,
},
Status: api.PersistentVolumeStatus{
Phase: api.VolumePending,
},
}
volumeIndex := NewPersistentVolumeOrderedIndex()
mockClient := &mockBinderClient{
claim: claim,
volume: pv,
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
// adds the volume to the index, making the volume available.
// pv also completed provisioning, so syncClaim should cause claim's phase to advance to Bound
syncVolume(volumeIndex, mockClient, pv)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
if mockClient.claim.Status.Phase != api.ClaimBound {
t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase)
}
}
func TestExampleObjects(t *testing.T) {
scenarios := map[string]struct {
expected interface{}
}{
"claims/claim-01.yaml": {
expected: &api.PersistentVolumeClaim{
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
},
},
},
},
},
"claims/claim-02.yaml": {
expected: &api.PersistentVolumeClaim{
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"),
},
},
},
},
},
"volumes/local-01.yaml": {
expected: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/somepath/data01",
},
},
},
},
},
"volumes/local-02.yaml": {
expected: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/somepath/data02",
},
},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
},
},
},
}
for name, scenario := range scenarios {
codec := api.Codecs.UniversalDecoder()
o := core.NewObjects(api.Scheme, codec)
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/"+name, o, codec); err != nil {
t.Fatal(err)
}
clientset := &fake.Clientset{}
clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))
if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolumeClaim{}) {
pvc, err := clientset.Core().PersistentVolumeClaims("ns").Get("doesntmatter")
if err != nil {
t.Fatalf("Error retrieving object: %v", err)
}
expected := scenario.expected.(*api.PersistentVolumeClaim)
if pvc.Spec.AccessModes[0] != expected.Spec.AccessModes[0] {
t.Errorf("Unexpected mismatch. Got %v wanted %v", pvc.Spec.AccessModes[0], expected.Spec.AccessModes[0])
}
aQty := pvc.Spec.Resources.Requests[api.ResourceStorage]
bQty := expected.Spec.Resources.Requests[api.ResourceStorage]
aSize := aQty.Value()
bSize := bQty.Value()
if aSize != bSize {
t.Errorf("Unexpected mismatch. Got %v wanted %v", aSize, bSize)
}
}
if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolume{}) {
pv, err := clientset.Core().PersistentVolumes().Get("doesntmatter")
if err != nil {
t.Fatalf("Error retrieving object: %v", err)
}
expected := scenario.expected.(*api.PersistentVolume)
if pv.Spec.AccessModes[0] != expected.Spec.AccessModes[0] {
t.Errorf("Unexpected mismatch. Got %v wanted %v", pv.Spec.AccessModes[0], expected.Spec.AccessModes[0])
}
aQty := pv.Spec.Capacity[api.ResourceStorage]
bQty := expected.Spec.Capacity[api.ResourceStorage]
aSize := aQty.Value()
bSize := bQty.Value()
if aSize != bSize {
t.Errorf("Unexpected mismatch. Got %v wanted %v", aSize, bSize)
}
if pv.Spec.HostPath.Path != expected.Spec.HostPath.Path {
t.Errorf("Unexpected mismatch. Got %v wanted %v", pv.Spec.HostPath.Path, expected.Spec.HostPath.Path)
}
}
}
}
func TestBindingWithExamples(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("claimbinder-test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
codec := api.Codecs.UniversalDecoder()
o := core.NewObjects(api.Scheme, codec)
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil {
t.Fatal(err)
}
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil {
t.Fatal(err)
}
clientset := &fake.Clientset{}
clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))
pv, err := clientset.Core().PersistentVolumes().Get("any")
if err != nil {
t.Errorf("Unexpected error getting PV from client: %v", err)
}
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
if err != nil {
t.Errorf("Unexpected error getting PV from client: %v", err)
}
pv.ObjectMeta.SelfLink = testapi.Default.SelfLink("pv", "")
// the default value of the PV is Pending. if processed at least once, its status in etcd is Available.
// There was a bug where only Pending volumes were being indexed and made ready for claims.
// Test that !Pending gets correctly added
pv.Status.Phase = api.VolumeAvailable
claim, error := clientset.Core().PersistentVolumeClaims("ns").Get("any")
if error != nil {
t.Errorf("Unexpected error getting PVC from client: %v", err)
}
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
volumeIndex := NewPersistentVolumeOrderedIndex()
mockClient := &mockBinderClient{
volume: pv,
claim: claim,
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
recycler := &PersistentVolumeRecycler{
kubeClient: clientset,
client: mockClient,
pluginMgr: plugMgr,
}
// adds the volume to the index, making the volume available
syncVolume(volumeIndex, mockClient, pv)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
// add the claim to fake API server
mockClient.UpdatePersistentVolumeClaim(claim)
// an initial sync for a claim will bind it to an unbound volume
syncClaim(volumeIndex, mockClient, claim)
// bind expected on pv.Spec but status update hasn't happened yet
if mockClient.volume.Spec.ClaimRef == nil {
t.Errorf("Expected ClaimRef but got nil for pv.Status.ClaimRef\n")
}
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
if mockClient.claim.Spec.VolumeName != pv.Name {
t.Errorf("Expected claim.Spec.VolumeName %s but got %s", mockClient.claim.Spec.VolumeName, pv.Name)
}
if mockClient.claim.Status.Phase != api.ClaimBound {
t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase)
}
// state changes in pvc triggers sync that sets pv attributes to pvc.Status
syncClaim(volumeIndex, mockClient, claim)
if len(mockClient.claim.Status.AccessModes) == 0 {
t.Errorf("Expected %d access modes but got 0", len(pv.Spec.AccessModes))
}
// persisting the bind to pv.Spec.ClaimRef triggers a sync
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeBound {
t.Errorf("Expected phase %s but got %s", api.VolumeBound, mockClient.volume.Status.Phase)
}
// pretend the user deleted their claim. periodic resync picks it up.
mockClient.claim = nil
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeReleased {
t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase)
}
// released volumes with a PersistentVolumeReclaimPolicy (recycle/delete) can have further processing
err = recycler.reclaimVolume(mockClient.volume)
if err != nil {
t.Errorf("Unexpected error reclaiming volume: %+v", err)
}
if mockClient.volume.Status.Phase != api.VolumePending {
t.Errorf("Expected phase %s but got %s", api.VolumePending, mockClient.volume.Status.Phase)
}
// after the recycling changes the phase to Pending, the binder picks up again
// to remove any vestiges of binding and make the volume Available again
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
if mockClient.volume.Spec.ClaimRef != nil {
t.Errorf("Expected nil ClaimRef: %+v", mockClient.volume.Spec.ClaimRef)
}
}
func TestCasting(t *testing.T) {
clientset := fake.NewSimpleClientset()
binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)
pv := &api.PersistentVolume{}
unk := cache.DeletedFinalStateUnknown{}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Status: api.PersistentVolumeClaimStatus{Phase: api.ClaimBound},
}
// Inject mockClient into the binder. This prevents weird errors on stderr
// as the binder wants to load PV/PVC from API server.
mockClient := &mockBinderClient{
volume: pv,
claim: pvc,
}
binder.client = mockClient
// none of these should fail casting.
// the real test is not failing when passed DeletedFinalStateUnknown in the deleteHandler
binder.addVolume(pv)
binder.updateVolume(pv, pv)
binder.deleteVolume(pv)
binder.deleteVolume(unk)
binder.addClaim(pvc)
binder.updateClaim(pvc, pvc)
}
func TestRecycledPersistentVolumeUID(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("claimbinder-test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
codec := api.Codecs.UniversalDecoder()
o := core.NewObjects(api.Scheme, codec)
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil {
t.Fatal(err)
}
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil {
t.Fatal(err)
}
clientset := &fake.Clientset{}
clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))
pv, err := clientset.Core().PersistentVolumes().Get("any")
if err != nil {
t.Errorf("Unexpected error getting PV from client: %v", err)
}
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
if err != nil {
t.Errorf("Unexpected error getting PV from client: %v", err)
}
pv.ObjectMeta.SelfLink = testapi.Default.SelfLink("pv", "")
// the default value of the PV is Pending. if processed at least once, its status in etcd is Available.
// There was a bug where only Pending volumes were being indexed and made ready for claims.
// Test that !Pending gets correctly added
pv.Status.Phase = api.VolumeAvailable
claim, error := clientset.Core().PersistentVolumeClaims("ns").Get("any")
if error != nil {
t.Errorf("Unexpected error getting PVC from client: %v", err)
}
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
claim.ObjectMeta.UID = types.UID("uid1")
volumeIndex := NewPersistentVolumeOrderedIndex()
mockClient := &mockBinderClient{
volume: pv,
claim: claim,
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
recycler := &PersistentVolumeRecycler{
kubeClient: clientset,
client: mockClient,
pluginMgr: plugMgr,
}
// adds the volume to the index, making the volume available
syncVolume(volumeIndex, mockClient, pv)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
// add the claim to fake API server
mockClient.UpdatePersistentVolumeClaim(claim)
// an initial sync for a claim will bind it to an unbound volume
syncClaim(volumeIndex, mockClient, claim)
// pretend the user deleted their claim. periodic resync picks it up.
mockClient.claim = nil
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeReleased {
t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase)
}
// released volumes with a PersistentVolumeReclaimPolicy (recycle/delete) can have further processing
err = recycler.reclaimVolume(mockClient.volume)
if err != nil {
t.Errorf("Unexpected error reclaiming volume: %+v", err)
}
if mockClient.volume.Status.Phase != api.VolumePending {
t.Errorf("Expected phase %s but got %s", api.VolumePending, mockClient.volume.Status.Phase)
}
// after the recycling changes the phase to Pending, the binder picks up again
// to remove any vestiges of binding and make the volume Available again
//
// explicitly set the claim's UID to a different value to ensure that a new claim with the same
// name as what the PV was previously bound still yields an available volume
claim.ObjectMeta.UID = types.UID("uid2")
mockClient.claim = claim
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
if mockClient.volume.Spec.ClaimRef != nil {
t.Errorf("Expected nil ClaimRef: %+v", mockClient.volume.Spec.ClaimRef)
}
}
type mockBinderClient struct {
volume *api.PersistentVolume
claim *api.PersistentVolumeClaim
}
func (c *mockBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
return c.volume, nil
}
func (c *mockBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
c.volume = volume
return c.volume, nil
}
func (c *mockBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
c.volume = nil
return nil
}
func (c *mockBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
c.volume = volume
return c.volume, nil
}
func (c *mockBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
if c.claim != nil {
return c.claim, nil
} else {
return nil, errors.NewNotFound(api.Resource("persistentvolumes"), name)
}
}
func (c *mockBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
c.claim = claim
return c.claim, nil
}
func (c *mockBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
c.claim = claim
return c.claim, nil
}
func newMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
return &mockRecycler{
path: spec.PersistentVolume.Spec.HostPath.Path,
}, nil
}
type mockRecycler struct {
path string
host volume.VolumeHost
volume.MetricsNil
}
func (r *mockRecycler) GetPath() string {
return r.path
}
func (r *mockRecycler) Recycle() error {
// return nil means recycle passed
return nil
}

View File

@ -1,536 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
// PersistentVolumeProvisionerController reconciles the state of all PersistentVolumes and PersistentVolumeClaims.
type PersistentVolumeProvisionerController struct {
volumeController *framework.Controller
volumeStore cache.Store
claimController *framework.Controller
claimStore cache.Store
client controllerClient
cloud cloudprovider.Interface
provisioner volume.ProvisionableVolumePlugin
pluginMgr volume.VolumePluginMgr
stopChannels map[string]chan struct{}
mutex sync.RWMutex
clusterName string
}
// constant name values for the controllers stopChannels map.
// the controller uses these for graceful shutdown
const volumesStopChannel = "volumes"
const claimsStopChannel = "claims"
// NewPersistentVolumeProvisionerController creates a new PersistentVolumeProvisionerController
func NewPersistentVolumeProvisionerController(client controllerClient, syncPeriod time.Duration, clusterName string, plugins []volume.VolumePlugin, provisioner volume.ProvisionableVolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeProvisionerController, error) {
controller := &PersistentVolumeProvisionerController{
client: client,
cloud: cloud,
provisioner: provisioner,
clusterName: clusterName,
}
if err := controller.pluginMgr.InitPlugins(plugins, controller); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolumeProvisionerController: %+v", err)
}
glog.V(5).Infof("Initializing provisioner: %s", controller.provisioner.Name())
controller.provisioner.Init(controller)
controller.volumeStore, controller.volumeController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.ListPersistentVolumes(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.WatchPersistentVolumes(options)
},
},
&api.PersistentVolume{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.handleAddVolume,
UpdateFunc: controller.handleUpdateVolume,
// delete handler not needed in this controller.
// volume deletion is handled by the recycler controller
},
)
controller.claimStore, controller.claimController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.ListPersistentVolumeClaims(api.NamespaceAll, options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.WatchPersistentVolumeClaims(api.NamespaceAll, options)
},
},
&api.PersistentVolumeClaim{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.handleAddClaim,
UpdateFunc: controller.handleUpdateClaim,
// delete handler not needed.
// normal recycling applies when a claim is deleted.
// recycling is handled by the binding controller.
},
)
return controller, nil
}
func (controller *PersistentVolumeProvisionerController) handleAddVolume(obj interface{}) {
controller.mutex.Lock()
defer controller.mutex.Unlock()
cachedPv, _, _ := controller.volumeStore.Get(obj)
if pv, ok := cachedPv.(*api.PersistentVolume); ok {
err := controller.reconcileVolume(pv)
if err != nil {
glog.Errorf("Error reconciling volume %s: %+v", pv.Name, err)
}
}
}
func (controller *PersistentVolumeProvisionerController) handleUpdateVolume(oldObj, newObj interface{}) {
// The flow for Update is the same as Add.
// A volume is only provisioned if not done so already.
controller.handleAddVolume(newObj)
}
func (controller *PersistentVolumeProvisionerController) handleAddClaim(obj interface{}) {
controller.mutex.Lock()
defer controller.mutex.Unlock()
cachedPvc, exists, _ := controller.claimStore.Get(obj)
if !exists {
glog.Errorf("PersistentVolumeClaim does not exist in the local cache: %+v", obj)
return
}
if pvc, ok := cachedPvc.(*api.PersistentVolumeClaim); ok {
err := controller.reconcileClaim(pvc)
if err != nil {
glog.Errorf("Error encoutered reconciling claim %s: %+v", pvc.Name, err)
}
}
}
func (controller *PersistentVolumeProvisionerController) handleUpdateClaim(oldObj, newObj interface{}) {
// The flow for Update is the same as Add.
// A volume is only provisioned for a claim if not done so already.
controller.handleAddClaim(newObj)
}
func (controller *PersistentVolumeProvisionerController) reconcileClaim(claim *api.PersistentVolumeClaim) error {
glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s] for dynamic provisioning", claim.Name)
// The claim may have been modified by parallel call to reconcileClaim, load
// the current version.
newClaim, err := controller.client.GetPersistentVolumeClaim(claim.Namespace, claim.Name)
if err != nil {
return fmt.Errorf("Cannot reload claim %s/%s: %v", claim.Namespace, claim.Name, err)
}
claim = newClaim
err = controller.claimStore.Update(claim)
if err != nil {
return fmt.Errorf("Cannot update claim %s/%s: %v", claim.Namespace, claim.Name, err)
}
if controller.provisioner == nil {
return fmt.Errorf("No provisioner configured for controller")
}
// no provisioning requested, return Pending. Claim may be pending indefinitely without a match.
if !keyExists(qosProvisioningKey, claim.Annotations) {
glog.V(5).Infof("PersistentVolumeClaim[%s] no provisioning required", claim.Name)
return nil
}
if len(claim.Spec.VolumeName) != 0 {
glog.V(5).Infof("PersistentVolumeClaim[%s] already bound. No provisioning required", claim.Name)
return nil
}
if isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, claim.Annotations) {
glog.V(5).Infof("PersistentVolumeClaim[%s] is already provisioned.", claim.Name)
return nil
}
glog.V(5).Infof("PersistentVolumeClaim[%s] provisioning", claim.Name)
provisioner, err := controller.newProvisioner(controller.provisioner, claim, nil)
if err != nil {
return fmt.Errorf("Unexpected error getting new provisioner for claim %s: %v\n", claim.Name, err)
}
newVolume, err := provisioner.NewPersistentVolumeTemplate()
if err != nil {
return fmt.Errorf("Unexpected error getting new volume template for claim %s: %v\n", claim.Name, err)
}
claimRef, err := api.GetReference(claim)
if err != nil {
return fmt.Errorf("Unexpected error getting claim reference for %s: %v\n", claim.Name, err)
}
storageClass, _ := claim.Annotations[qosProvisioningKey]
// the creation of this volume is the bind to the claim.
// The claim will match the volume during the next sync period when the volume is in the local cache
newVolume.Spec.ClaimRef = claimRef
newVolume.Annotations[pvProvisioningRequiredAnnotationKey] = "true"
newVolume.Annotations[qosProvisioningKey] = storageClass
newVolume, err = controller.client.CreatePersistentVolume(newVolume)
glog.V(5).Infof("Unprovisioned PersistentVolume[%s] created for PVC[%s], which will be fulfilled in the storage provider", newVolume.Name, claim.Name)
if err != nil {
return fmt.Errorf("PersistentVolumeClaim[%s] failed provisioning: %+v", claim.Name, err)
}
claim.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue
_, err = controller.client.UpdatePersistentVolumeClaim(claim)
if err != nil {
glog.Errorf("error updating persistent volume claim: %v", err)
}
return nil
}
func (controller *PersistentVolumeProvisionerController) reconcileVolume(pv *api.PersistentVolume) error {
glog.V(5).Infof("PersistentVolume[%s] reconciling", pv.Name)
// The PV may have been modified by parallel call to reconcileVolume, load
// the current version.
newPv, err := controller.client.GetPersistentVolume(pv.Name)
if err != nil {
return fmt.Errorf("Cannot reload volume %s: %v", pv.Name, err)
}
pv = newPv
if pv.Spec.ClaimRef == nil {
glog.V(5).Infof("PersistentVolume[%s] is not bound to a claim. No provisioning required", pv.Name)
return nil
}
// TODO: fix this leaky abstraction. Had to make our own store key because ClaimRef fails the default keyfunc (no Meta on object).
obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name))
if !exists {
return fmt.Errorf("PersistentVolumeClaim[%s/%s] not found in local cache", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
}
claim, ok := obj.(*api.PersistentVolumeClaim)
if !ok {
return fmt.Errorf("PersistentVolumeClaim expected, but got %v", obj)
}
// no provisioning required, volume is ready and Bound
if !keyExists(pvProvisioningRequiredAnnotationKey, pv.Annotations) {
glog.V(5).Infof("PersistentVolume[%s] does not require provisioning", pv.Name)
return nil
}
// provisioning is completed, volume is ready.
if isProvisioningComplete(pv) {
glog.V(5).Infof("PersistentVolume[%s] is bound and provisioning is complete", pv.Name)
if pv.Spec.ClaimRef.Namespace != claim.Namespace || pv.Spec.ClaimRef.Name != claim.Name {
return fmt.Errorf("pre-bind mismatch - expected %s but found %s/%s", claimToClaimKey(claim), pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
}
return nil
}
// provisioning is incomplete. Attempt to provision the volume.
glog.V(5).Infof("PersistentVolume[%s] provisioning in progress", pv.Name)
err = provisionVolume(pv, controller)
if err != nil {
return fmt.Errorf("Error provisioning PersistentVolume[%s]: %v", pv.Name, err)
}
return nil
}
// provisionVolume provisions a volume that has been created in the cluster but not yet fulfilled by
// the storage provider.
func provisionVolume(pv *api.PersistentVolume, controller *PersistentVolumeProvisionerController) error {
if isProvisioningComplete(pv) {
return fmt.Errorf("PersistentVolume[%s] is already provisioned", pv.Name)
}
if _, exists := pv.Annotations[qosProvisioningKey]; !exists {
return fmt.Errorf("PersistentVolume[%s] does not contain a provisioning request. Provisioning not required.", pv.Name)
}
if controller.provisioner == nil {
return fmt.Errorf("No provisioner found for volume: %s", pv.Name)
}
// Find the claim in local cache
obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name))
if !exists {
return fmt.Errorf("Could not find PersistentVolumeClaim[%s/%s] in local cache", pv.Spec.ClaimRef.Name, pv.Name)
}
claim := obj.(*api.PersistentVolumeClaim)
provisioner, _ := controller.newProvisioner(controller.provisioner, claim, pv)
err := provisioner.Provision(pv)
if err != nil {
glog.Errorf("Could not provision %s", pv.Name)
pv.Status.Phase = api.VolumeFailed
pv.Status.Message = err.Error()
if pv, apiErr := controller.client.UpdatePersistentVolumeStatus(pv); apiErr != nil {
return fmt.Errorf("PersistentVolume[%s] failed provisioning and also failed status update: %v - %v", pv.Name, err, apiErr)
}
return fmt.Errorf("PersistentVolume[%s] failed provisioning: %v", pv.Name, err)
}
clone, err := conversion.NewCloner().DeepCopy(pv)
volumeClone, ok := clone.(*api.PersistentVolume)
if !ok {
return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
}
volumeClone.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue
pv, err = controller.client.UpdatePersistentVolume(volumeClone)
if err != nil {
// TODO: https://github.com/kubernetes/kubernetes/issues/14443
// the volume was created in the infrastructure and likely has a PV name on it,
// but we failed to save the annotation that marks the volume as provisioned.
return fmt.Errorf("Error updating PersistentVolume[%s] with provisioning completed annotation. There is a potential for dupes and orphans.", volumeClone.Name)
}
return nil
}
// Run starts all of this controller's control loops
func (controller *PersistentVolumeProvisionerController) Run() {
glog.V(5).Infof("Starting PersistentVolumeProvisionerController\n")
if controller.stopChannels == nil {
controller.stopChannels = make(map[string]chan struct{})
}
if _, exists := controller.stopChannels[volumesStopChannel]; !exists {
controller.stopChannels[volumesStopChannel] = make(chan struct{})
go controller.volumeController.Run(controller.stopChannels[volumesStopChannel])
}
if _, exists := controller.stopChannels[claimsStopChannel]; !exists {
controller.stopChannels[claimsStopChannel] = make(chan struct{})
go controller.claimController.Run(controller.stopChannels[claimsStopChannel])
}
}
// Stop gracefully shuts down this controller
func (controller *PersistentVolumeProvisionerController) Stop() {
glog.V(5).Infof("Stopping PersistentVolumeProvisionerController\n")
for name, stopChan := range controller.stopChannels {
close(stopChan)
delete(controller.stopChannels, name)
}
}
func (controller *PersistentVolumeProvisionerController) newProvisioner(plugin volume.ProvisionableVolumePlugin, claim *api.PersistentVolumeClaim, pv *api.PersistentVolume) (volume.Provisioner, error) {
tags := make(map[string]string)
tags[cloudVolumeCreatedForClaimNamespaceTag] = claim.Namespace
tags[cloudVolumeCreatedForClaimNameTag] = claim.Name
// pv can be nil when the provisioner has not created the PV yet
if pv != nil {
tags[cloudVolumeCreatedForVolumeNameTag] = pv.Name
}
volumeOptions := volume.VolumeOptions{
Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)],
AccessModes: claim.Spec.AccessModes,
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
CloudTags: &tags,
ClusterName: controller.clusterName,
}
if pv != nil {
volumeOptions.PVName = pv.Name
}
provisioner, err := plugin.NewProvisioner(volumeOptions)
return provisioner, err
}
// controllerClient abstracts access to PVs and PVCs. Easy to mock for testing and wrap for real client.
type controllerClient interface {
CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error)
ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error)
WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error)
GetPersistentVolume(name string) (*api.PersistentVolume, error)
UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error)
DeletePersistentVolume(volume *api.PersistentVolume) error
UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error)
ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error)
WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error)
UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
// provided to give VolumeHost and plugins access to the kube client
GetKubeClient() clientset.Interface
}
func NewControllerClient(c clientset.Interface) controllerClient {
return &realControllerClient{c}
}
var _ controllerClient = &realControllerClient{}
type realControllerClient struct {
client clientset.Interface
}
func (c *realControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Get(name)
}
func (c *realControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) {
return c.client.Core().PersistentVolumes().List(options)
}
func (c *realControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) {
return c.client.Core().PersistentVolumes().Watch(options)
}
func (c *realControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Create(pv)
}
func (c *realControllerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Update(volume)
}
func (c *realControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
return c.client.Core().PersistentVolumes().Delete(volume.Name, nil)
}
func (c *realControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().UpdateStatus(volume)
}
func (c *realControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
return c.client.Core().PersistentVolumeClaims(namespace).Get(name)
}
func (c *realControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) {
return c.client.Core().PersistentVolumeClaims(namespace).List(options)
}
func (c *realControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) {
return c.client.Core().PersistentVolumeClaims(namespace).Watch(options)
}
func (c *realControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
return c.client.Core().PersistentVolumeClaims(claim.Namespace).Update(claim)
}
func (c *realControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
return c.client.Core().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
}
func (c *realControllerClient) GetKubeClient() clientset.Interface {
return c.client
}
func keyExists(key string, haystack map[string]string) bool {
_, exists := haystack[key]
return exists
}
func isProvisioningComplete(pv *api.PersistentVolume) bool {
return isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, pv.Annotations)
}
func isAnnotationMatch(key, needle string, haystack map[string]string) bool {
value, exists := haystack[key]
if !exists {
return false
}
return value == needle
}
func isRecyclable(policy api.PersistentVolumeReclaimPolicy) bool {
return policy == api.PersistentVolumeReclaimDelete || policy == api.PersistentVolumeReclaimRecycle
}
// VolumeHost implementation
// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes.
// Because no mounting is performed, most of the VolumeHost methods are not implemented.
func (c *PersistentVolumeProvisionerController) GetPluginDir(podUID string) string {
return ""
}
func (c *PersistentVolumeProvisionerController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return ""
}
func (c *PersistentVolumeProvisionerController) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (c *PersistentVolumeProvisionerController) GetKubeClient() clientset.Interface {
return c.client.GetKubeClient()
}
func (c *PersistentVolumeProvisionerController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperMounter not supported by PVClaimBinder's VolumeHost implementation")
}
func (c *PersistentVolumeProvisionerController) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("NewWrapperUnmounter not supported by PVClaimBinder's VolumeHost implementation")
}
func (c *PersistentVolumeProvisionerController) GetCloudProvider() cloudprovider.Interface {
return c.cloud
}
func (c *PersistentVolumeProvisionerController) GetMounter() mount.Interface {
return nil
}
func (c *PersistentVolumeProvisionerController) GetWriter() io.Writer {
return nil
}
func (c *PersistentVolumeProvisionerController) GetHostName() string {
return ""
}
const (
// these pair of constants are used by the provisioner.
// The key is a kube namespaced key that denotes a volume requires provisioning.
// The value is set only when provisioning is completed. Any other value will tell the provisioner
// that provisioning has not yet occurred.
pvProvisioningRequiredAnnotationKey = "volume.experimental.kubernetes.io/provisioning-required"
pvProvisioningCompletedAnnotationValue = "volume.experimental.kubernetes.io/provisioning-completed"
)

View File

@ -1,295 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/util"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/watch"
)
func TestProvisionerRunStop(t *testing.T) {
controller, _, _ := makeTestController()
if len(controller.stopChannels) != 0 {
t.Errorf("Non-running provisioner should not have any stopChannels. Got %v", len(controller.stopChannels))
}
controller.Run()
if len(controller.stopChannels) != 2 {
t.Errorf("Running provisioner should have exactly 2 stopChannels. Got %v", len(controller.stopChannels))
}
controller.Stop()
if len(controller.stopChannels) != 0 {
t.Errorf("Non-running provisioner should not have any stopChannels. Got %v", len(controller.stopChannels))
}
}
func makeTestVolume() *api.PersistentVolume {
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{},
Name: "pv01",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/somepath/data01",
},
},
},
}
}
func makeTestClaim() *api.PersistentVolumeClaim {
return &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{},
Name: "claim01",
Namespace: "ns",
SelfLink: testapi.Default.SelfLink("pvc", ""),
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("8G"),
},
},
},
}
}
func makeTestController() (*PersistentVolumeProvisionerController, *mockControllerClient, *volumetest.FakeVolumePlugin) {
mockClient := &mockControllerClient{}
mockVolumePlugin := &volumetest.FakeVolumePlugin{}
controller, _ := NewPersistentVolumeProvisionerController(mockClient, 1*time.Second, "fake-kubernetes", nil, mockVolumePlugin, &fake_cloud.FakeCloud{})
return controller, mockClient, mockVolumePlugin
}
func TestReconcileClaim(t *testing.T) {
controller, mockClient, _ := makeTestController()
pvc := makeTestClaim()
// watch would have added the claim to the store
controller.claimStore.Add(pvc)
// store it in fake API server
mockClient.UpdatePersistentVolumeClaim(pvc)
err := controller.reconcileClaim(pvc)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
// non-provisionable PVC should not have created a volume on reconciliation
if mockClient.volume != nil {
t.Error("Unexpected volume found in mock client. Expected nil")
}
pvc.Annotations[qosProvisioningKey] = "foo"
// store it in fake API server
mockClient.UpdatePersistentVolumeClaim(pvc)
err = controller.reconcileClaim(pvc)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
// PVC requesting provisioning should have a PV created for it
if mockClient.volume == nil {
t.Error("Expected to find bound volume but got nil")
}
if mockClient.volume.Spec.ClaimRef.Name != pvc.Name {
t.Errorf("Expected PV to be bound to %s but got %s", mockClient.volume.Spec.ClaimRef.Name, pvc.Name)
}
// the PVC should have correct annotation
if mockClient.claim.Annotations[pvProvisioningRequiredAnnotationKey] != pvProvisioningCompletedAnnotationValue {
t.Errorf("Annotation %q not set", pvProvisioningRequiredAnnotationKey)
}
// Run the syncClaim 2nd time to simulate periodic sweep running in parallel
// to the previous syncClaim. There is a lock in handleUpdateVolume(), so
// they will be called sequentially, but the second call will have old
// version of the claim.
oldPVName := mockClient.volume.Name
// Make the "old" claim
pvc2 := makeTestClaim()
pvc2.Annotations[qosProvisioningKey] = "foo"
// Add a dummy annotation so we recognize the claim was updated (i.e.
// stored in mockClient)
pvc2.Annotations["test"] = "test"
err = controller.reconcileClaim(pvc2)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
// The 2nd PVC should be ignored, no new PV was created
if val, found := pvc2.Annotations[pvProvisioningRequiredAnnotationKey]; found {
t.Errorf("2nd PVC got unexpected annotation %q: %q", pvProvisioningRequiredAnnotationKey, val)
}
if mockClient.volume.Name != oldPVName {
t.Errorf("2nd PVC unexpectedly provisioned a new volume")
}
if _, found := mockClient.claim.Annotations["test"]; found {
t.Errorf("2nd PVC was unexpectedly updated")
}
}
func checkTagValue(t *testing.T, tags map[string]string, tag string, expectedValue string) {
value, found := tags[tag]
if !found || value != expectedValue {
t.Errorf("Expected tag value %s = %s but value %s found", tag, expectedValue, value)
}
}
func TestReconcileVolume(t *testing.T) {
controller, mockClient, mockVolumePlugin := makeTestController()
pv := makeTestVolume()
pvc := makeTestClaim()
mockClient.volume = pv
err := controller.reconcileVolume(pv)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
// watch adds claim to the store.
// we need to add it to our mock client to mimic normal Get call
controller.claimStore.Add(pvc)
mockClient.claim = pvc
// pretend the claim and volume are bound, no provisioning required
claimRef, _ := api.GetReference(pvc)
pv.Spec.ClaimRef = claimRef
mockClient.volume = pv
err = controller.reconcileVolume(pv)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
pv.Annotations[pvProvisioningRequiredAnnotationKey] = "!pvProvisioningCompleted"
pv.Annotations[qosProvisioningKey] = "foo"
mockClient.volume = pv
err = controller.reconcileVolume(pv)
if !isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, mockClient.volume.Annotations) {
t.Errorf("Expected %s but got %s", pvProvisioningRequiredAnnotationKey, mockClient.volume.Annotations[pvProvisioningRequiredAnnotationKey])
}
// Check that the volume plugin was called with correct tags
tags := *mockVolumePlugin.LastProvisionerOptions.CloudTags
checkTagValue(t, tags, cloudVolumeCreatedForClaimNamespaceTag, pvc.Namespace)
checkTagValue(t, tags, cloudVolumeCreatedForClaimNameTag, pvc.Name)
checkTagValue(t, tags, cloudVolumeCreatedForVolumeNameTag, pv.Name)
}
var _ controllerClient = &mockControllerClient{}
type mockControllerClient struct {
volume *api.PersistentVolume
claim *api.PersistentVolumeClaim
}
func (c *mockControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
return c.volume, nil
}
func (c *mockControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
if pv.GenerateName != "" && pv.Name == "" {
pv.Name = fmt.Sprintf(pv.GenerateName, util.NewUUID())
}
c.volume = pv
return c.volume, nil
}
func (c *mockControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) {
return &api.PersistentVolumeList{
Items: []api.PersistentVolume{*c.volume},
}, nil
}
func (c *mockControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil
}
func (c *mockControllerClient) UpdatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.CreatePersistentVolume(pv)
}
func (c *mockControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
c.volume = nil
return nil
}
func (c *mockControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return volume, nil
}
func (c *mockControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
if c.claim != nil {
return c.claim, nil
} else {
return nil, errors.NewNotFound(api.Resource("persistentvolumes"), name)
}
}
func (c *mockControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) {
return &api.PersistentVolumeClaimList{
Items: []api.PersistentVolumeClaim{*c.claim},
}, nil
}
func (c *mockControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil
}
func (c *mockControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
c.claim = claim
return c.claim, nil
}
func (c *mockControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
return claim, nil
}
func (c *mockControllerClient) GetKubeClient() clientset.Interface {
return nil
}

View File

@ -1,415 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
ioutil "k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/watch"
)
var _ volume.VolumeHost = &PersistentVolumeRecycler{}
// PersistentVolumeRecycler is a controller that watches for PersistentVolumes that are released from their claims.
// This controller will Recycle those volumes whose reclaim policy is set to PersistentVolumeReclaimRecycle and make them
// available again for a new claim.
type PersistentVolumeRecycler struct {
volumeController *framework.Controller
stopChannel chan struct{}
client recyclerClient
kubeClient clientset.Interface
pluginMgr volume.VolumePluginMgr
cloud cloudprovider.Interface
maximumRetry int
syncPeriod time.Duration
// Local cache of failed recycle / delete operations. Map volume.Name -> status of the volume.
// Only PVs in Released state have an entry here.
releasedVolumes map[string]releasedVolumeStatus
}
// releasedVolumeStatus holds state of failed delete/recycle operation on a
// volume. The controller re-tries the operation several times and it stores
// retry count + timestamp of the last attempt here.
type releasedVolumeStatus struct {
// How many recycle/delete operations failed.
retryCount int
// Timestamp of the last attempt.
lastAttempt time.Time
}
// NewPersistentVolumeRecycler creates a new PersistentVolumeRecycler
func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time.Duration, maximumRetry int, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
recyclerClient := NewRecyclerClient(kubeClient)
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("pv_recycler_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
recycler := &PersistentVolumeRecycler{
client: recyclerClient,
kubeClient: kubeClient,
cloud: cloud,
maximumRetry: maximumRetry,
syncPeriod: syncPeriod,
releasedVolumes: make(map[string]releasedVolumeStatus),
}
if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for PVClaimBinder: %+v", err)
}
_, volumeController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumes().Watch(options)
},
},
&api.PersistentVolume{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pv, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Error casting object to PersistentVolume: %v", obj)
return
}
recycler.reclaimVolume(pv)
},
UpdateFunc: func(oldObj, newObj interface{}) {
pv, ok := newObj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Error casting object to PersistentVolume: %v", newObj)
return
}
recycler.reclaimVolume(pv)
},
DeleteFunc: func(obj interface{}) {
pv, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Error casting object to PersistentVolume: %v", obj)
return
}
recycler.reclaimVolume(pv)
recycler.removeReleasedVolume(pv)
},
},
)
recycler.volumeController = volumeController
return recycler, nil
}
// shouldRecycle checks a volume and returns nil, if the volume should be
// recycled right now. Otherwise it returns an error with reason why it should
// not be recycled.
func (recycler *PersistentVolumeRecycler) shouldRecycle(pv *api.PersistentVolume) error {
if pv.Spec.ClaimRef == nil {
return fmt.Errorf("Volume does not have a reference to claim")
}
if pv.Status.Phase != api.VolumeReleased {
return fmt.Errorf("The volume is not in 'Released' phase")
}
// The volume is Released, should we retry recycling?
status, found := recycler.releasedVolumes[pv.Name]
if !found {
// We don't know anything about this volume. The controller has been
// restarted or the volume has been marked as Released by another
// controller. Recycle/delete this volume as if it was just Released.
glog.V(5).Infof("PersistentVolume[%s] not found in local cache, recycling", pv.Name)
return nil
}
// Check the timestamp
expectedRetry := status.lastAttempt.Add(recycler.syncPeriod)
if time.Now().After(expectedRetry) {
glog.V(5).Infof("PersistentVolume[%s] retrying recycle after timeout", pv.Name)
return nil
}
// It's too early
glog.V(5).Infof("PersistentVolume[%s] skipping recycle, it's too early: now: %v, next retry: %v", pv.Name, time.Now(), expectedRetry)
return fmt.Errorf("Too early after previous failure")
}
func (recycler *PersistentVolumeRecycler) reclaimVolume(pv *api.PersistentVolume) error {
glog.V(5).Infof("Recycler: checking PersistentVolume[%s]\n", pv.Name)
// Always load the latest version of the volume
newPV, err := recycler.client.GetPersistentVolume(pv.Name)
if err != nil {
return fmt.Errorf("Could not find PersistentVolume %s", pv.Name)
}
pv = newPV
err = recycler.shouldRecycle(pv)
if err == nil {
glog.V(5).Infof("Reclaiming PersistentVolume[%s]\n", pv.Name)
// both handleRecycle and handleDelete block until completion
// TODO: allow parallel recycling operations to increase throughput
switch pv.Spec.PersistentVolumeReclaimPolicy {
case api.PersistentVolumeReclaimRecycle:
err = recycler.handleRecycle(pv)
case api.PersistentVolumeReclaimDelete:
err = recycler.handleDelete(pv)
case api.PersistentVolumeReclaimRetain:
glog.V(5).Infof("Volume %s is set to retain after release. Skipping.\n", pv.Name)
default:
err = fmt.Errorf("No PersistentVolumeReclaimPolicy defined for spec: %+v", pv)
}
if err != nil {
errMsg := fmt.Sprintf("Could not recycle volume spec: %+v", err)
glog.Errorf(errMsg)
return fmt.Errorf(errMsg)
}
return nil
}
glog.V(3).Infof("PersistentVolume[%s] phase %s - skipping: %v", pv.Name, pv.Status.Phase, err)
return nil
}
// handleReleaseFailure evaluates a failed Recycle/Delete operation, updates
// internal controller state with new nr. of attempts and timestamp of the last
// attempt. Based on the number of failures it returns the next state of the
// volume (Released / Failed).
func (recycler *PersistentVolumeRecycler) handleReleaseFailure(pv *api.PersistentVolume) api.PersistentVolumePhase {
status, found := recycler.releasedVolumes[pv.Name]
if !found {
// First failure, set retryCount to 0 (will be inceremented few lines below)
status = releasedVolumeStatus{}
}
status.retryCount += 1
if status.retryCount > recycler.maximumRetry {
// This was the last attempt. Remove any internal state and mark the
// volume as Failed.
glog.V(3).Infof("PersistentVolume[%s] failed %d times - marking Failed", pv.Name, status.retryCount)
recycler.removeReleasedVolume(pv)
return api.VolumeFailed
}
status.lastAttempt = time.Now()
recycler.releasedVolumes[pv.Name] = status
return api.VolumeReleased
}
func (recycler *PersistentVolumeRecycler) removeReleasedVolume(pv *api.PersistentVolume) {
delete(recycler.releasedVolumes, pv.Name)
}
func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume) error {
glog.V(5).Infof("Recycling PersistentVolume[%s]\n", pv.Name)
currentPhase := pv.Status.Phase
nextPhase := currentPhase
spec := volume.NewSpecFromPersistentVolume(pv, false)
plugin, err := recycler.pluginMgr.FindRecyclablePluginBySpec(spec)
if err != nil {
nextPhase = api.VolumeFailed
pv.Status.Message = fmt.Sprintf("%v", err)
}
// an error above means a suitable plugin for this volume was not found.
// we don't need to attempt recycling when plugin is nil, but we do need to persist the next/failed phase
// of the volume so that subsequent syncs won't attempt recycling through this handler func.
if plugin != nil {
volRecycler, err := plugin.NewRecycler(spec)
if err != nil {
return fmt.Errorf("Could not obtain Recycler for spec: %#v error: %v", spec, err)
}
// blocks until completion
if err := volRecycler.Recycle(); err != nil {
glog.Errorf("PersistentVolume[%s] failed recycling: %+v", pv.Name, err)
pv.Status.Message = fmt.Sprintf("Recycling error: %s", err)
nextPhase = recycler.handleReleaseFailure(pv)
} else {
glog.V(5).Infof("PersistentVolume[%s] successfully recycled\n", pv.Name)
// The volume has been recycled. Remove any internal state to make
// any subsequent bind+recycle cycle working.
recycler.removeReleasedVolume(pv)
nextPhase = api.VolumePending
}
}
if currentPhase != nextPhase {
glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase)
pv.Status.Phase = nextPhase
_, err := recycler.client.UpdatePersistentVolumeStatus(pv)
if err != nil {
// Rollback to previous phase
pv.Status.Phase = currentPhase
}
}
return nil
}
func (recycler *PersistentVolumeRecycler) handleDelete(pv *api.PersistentVolume) error {
glog.V(5).Infof("Deleting PersistentVolume[%s]\n", pv.Name)
currentPhase := pv.Status.Phase
nextPhase := currentPhase
spec := volume.NewSpecFromPersistentVolume(pv, false)
plugin, err := recycler.pluginMgr.FindDeletablePluginBySpec(spec)
if err != nil {
nextPhase = api.VolumeFailed
pv.Status.Message = fmt.Sprintf("%v", err)
}
// an error above means a suitable plugin for this volume was not found.
// we don't need to attempt deleting when plugin is nil, but we do need to persist the next/failed phase
// of the volume so that subsequent syncs won't attempt deletion through this handler func.
if plugin != nil {
deleter, err := plugin.NewDeleter(spec)
if err != nil {
return fmt.Errorf("Could not obtain Deleter for spec: %#v error: %v", spec, err)
}
// blocks until completion
err = deleter.Delete()
if err != nil {
glog.Errorf("PersistentVolume[%s] failed deletion: %+v", pv.Name, err)
pv.Status.Message = fmt.Sprintf("Deletion error: %s", err)
nextPhase = recycler.handleReleaseFailure(pv)
} else {
glog.V(5).Infof("PersistentVolume[%s] successfully deleted through plugin\n", pv.Name)
recycler.removeReleasedVolume(pv)
// after successful deletion through the plugin, we can also remove the PV from the cluster
if err := recycler.client.DeletePersistentVolume(pv); err != nil {
return fmt.Errorf("error deleting persistent volume: %+v", err)
}
}
}
if currentPhase != nextPhase {
glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase)
pv.Status.Phase = nextPhase
_, err := recycler.client.UpdatePersistentVolumeStatus(pv)
if err != nil {
// Rollback to previous phase
pv.Status.Phase = currentPhase
}
}
return nil
}
// Run starts this recycler's control loops
func (recycler *PersistentVolumeRecycler) Run() {
glog.V(5).Infof("Starting PersistentVolumeRecycler\n")
if recycler.stopChannel == nil {
recycler.stopChannel = make(chan struct{})
go recycler.volumeController.Run(recycler.stopChannel)
}
}
// Stop gracefully shuts down this binder
func (recycler *PersistentVolumeRecycler) Stop() {
glog.V(5).Infof("Stopping PersistentVolumeRecycler\n")
if recycler.stopChannel != nil {
close(recycler.stopChannel)
recycler.stopChannel = nil
}
}
// recyclerClient abstracts access to PVs
type recyclerClient interface {
GetPersistentVolume(name string) (*api.PersistentVolume, error)
UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error)
DeletePersistentVolume(volume *api.PersistentVolume) error
UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
}
func NewRecyclerClient(c clientset.Interface) recyclerClient {
return &realRecyclerClient{c}
}
type realRecyclerClient struct {
client clientset.Interface
}
func (c *realRecyclerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Get(name)
}
func (c *realRecyclerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().Update(volume)
}
func (c *realRecyclerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
return c.client.Core().PersistentVolumes().Delete(volume.Name, nil)
}
func (c *realRecyclerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.Core().PersistentVolumes().UpdateStatus(volume)
}
// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes.
// Because no mounting is performed, most of the VolumeHost methods are not implemented.
func (f *PersistentVolumeRecycler) GetPluginDir(podUID string) string {
return ""
}
func (f *PersistentVolumeRecycler) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return ""
}
func (f *PersistentVolumeRecycler) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (f *PersistentVolumeRecycler) GetKubeClient() clientset.Interface {
return f.kubeClient
}
func (f *PersistentVolumeRecycler) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperMounter not supported by PVClaimBinder's VolumeHost implementation")
}
func (f *PersistentVolumeRecycler) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("NewWrapperUnmounter not supported by PVClaimBinder's VolumeHost implementation")
}
func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface {
return f.cloud
}
func (f *PersistentVolumeRecycler) GetMounter() mount.Interface {
return nil
}
func (f *PersistentVolumeRecycler) GetWriter() ioutil.Writer {
return nil
}
func (f *PersistentVolumeRecycler) GetHostName() string {
return ""
}

View File

@ -1,265 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/host_path"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
mySyncPeriod = 2 * time.Second
myMaximumRetry = 3
)
func TestFailedRecycling(t *testing.T) {
pv := preparePV()
mockClient := &mockBinderClient{
volume: pv,
}
// no Init called for pluginMgr and no plugins are available. Volume should fail recycling.
plugMgr := volume.VolumePluginMgr{}
recycler := &PersistentVolumeRecycler{
kubeClient: fake.NewSimpleClientset(),
client: mockClient,
pluginMgr: plugMgr,
releasedVolumes: make(map[string]releasedVolumeStatus),
}
err := recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("Unexpected non-nil error: %v", err)
}
if mockClient.volume.Status.Phase != api.VolumeFailed {
t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase)
}
// Use a new volume for the next test
pv = preparePV()
mockClient.volume = pv
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete
err = recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("Unexpected non-nil error: %v", err)
}
if mockClient.volume.Status.Phase != api.VolumeFailed {
t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase)
}
}
func TestRecyclingRetry(t *testing.T) {
// Test that recycler controller retries to recycle a volume several times, which succeeds eventually
pv := preparePV()
mockClient := &mockBinderClient{
volume: pv,
}
plugMgr := volume.VolumePluginMgr{}
// Use a fake NewRecycler function
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newFailingMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
// Reset a global call counter
failedCallCount = 0
recycler := &PersistentVolumeRecycler{
kubeClient: fake.NewSimpleClientset(),
client: mockClient,
pluginMgr: plugMgr,
syncPeriod: mySyncPeriod,
maximumRetry: myMaximumRetry,
releasedVolumes: make(map[string]releasedVolumeStatus),
}
// All but the last attempt will fail
testRecycleFailures(t, recycler, mockClient, pv, myMaximumRetry-1)
// The last attempt should succeed
err := recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("Last step: Recycler failed: %v", err)
}
if mockClient.volume.Status.Phase != api.VolumePending {
t.Errorf("Last step: The volume should be Pending, but is %s instead", mockClient.volume.Status.Phase)
}
// Check the cache, it should not have any entry
status, found := recycler.releasedVolumes[pv.Name]
if found {
t.Errorf("Last step: Expected PV to be removed from cache, got %v", status)
}
}
func TestRecyclingRetryAlwaysFail(t *testing.T) {
// Test that recycler controller retries to recycle a volume several times, which always fails.
pv := preparePV()
mockClient := &mockBinderClient{
volume: pv,
}
plugMgr := volume.VolumePluginMgr{}
// Use a fake NewRecycler function
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newAlwaysFailingMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
// Reset a global call counter
failedCallCount = 0
recycler := &PersistentVolumeRecycler{
kubeClient: fake.NewSimpleClientset(),
client: mockClient,
pluginMgr: plugMgr,
syncPeriod: mySyncPeriod,
maximumRetry: myMaximumRetry,
releasedVolumes: make(map[string]releasedVolumeStatus),
}
// myMaximumRetry recycle attempts will fail
testRecycleFailures(t, recycler, mockClient, pv, myMaximumRetry)
// The volume should be failed after myMaximumRetry attempts
err := recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("Last step: Recycler failed: %v", err)
}
if mockClient.volume.Status.Phase != api.VolumeFailed {
t.Errorf("Last step: The volume should be Failed, but is %s instead", mockClient.volume.Status.Phase)
}
// Check the cache, it should not have any entry
status, found := recycler.releasedVolumes[pv.Name]
if found {
t.Errorf("Last step: Expected PV to be removed from cache, got %v", status)
}
}
func preparePV() *api.PersistentVolume {
return &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/tmp/data02",
},
},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
ClaimRef: &api.ObjectReference{
Name: "foo",
Namespace: "bar",
},
},
Status: api.PersistentVolumeStatus{
Phase: api.VolumeReleased,
},
}
}
// Test that `count` attempts to recycle a PV fails.
func testRecycleFailures(t *testing.T, recycler *PersistentVolumeRecycler, mockClient *mockBinderClient, pv *api.PersistentVolume, count int) {
for i := 1; i <= count; i++ {
err := recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("STEP %d: Recycler faled: %v", i, err)
}
// Check the status, it should be failed
if mockClient.volume.Status.Phase != api.VolumeReleased {
t.Errorf("STEP %d: The volume should be Released, but is %s instead", i, mockClient.volume.Status.Phase)
}
// Check the failed volume cache
status, found := recycler.releasedVolumes[pv.Name]
if !found {
t.Errorf("STEP %d: cannot find released volume status", i)
}
if status.retryCount != i {
t.Errorf("STEP %d: Expected nr. of attempts to be %d, got %d", i, i, status.retryCount)
}
// call reclaimVolume too early, it should not increment the retryCount
time.Sleep(mySyncPeriod / 2)
err = recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("STEP %d: Recycler failed: %v", i, err)
}
status, found = recycler.releasedVolumes[pv.Name]
if !found {
t.Errorf("STEP %d: cannot find released volume status", i)
}
if status.retryCount != i {
t.Errorf("STEP %d: Expected nr. of attempts to be %d, got %d", i, i, status.retryCount)
}
// Call the next reclaimVolume() after full pvRecycleRetryPeriod
time.Sleep(mySyncPeriod / 2)
}
}
func newFailingMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
return &failingMockRecycler{
path: spec.PersistentVolume.Spec.HostPath.Path,
errorCount: myMaximumRetry - 1, // fail two times and then successfully recycle the volume
}, nil
}
func newAlwaysFailingMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
return &failingMockRecycler{
path: spec.PersistentVolume.Spec.HostPath.Path,
errorCount: 1000, // always fail
}, nil
}
type failingMockRecycler struct {
path string
// How many times should the recycler fail before returning success.
errorCount int
volume.MetricsNil
}
// Counter of failingMockRecycler.Recycle() calls. Global variable just for
// testing. It's too much code to create a custom volume plugin, which would
// hold this variable.
var failedCallCount = 0
func (r *failingMockRecycler) GetPath() string {
return r.path
}
func (r *failingMockRecycler) Recycle() error {
failedCallCount += 1
if failedCallCount <= r.errorCount {
return fmt.Errorf("Failing for %d. time", failedCallCount)
}
// return nil means recycle passed
return nil
}

View File

@ -0,0 +1,256 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
)
// Test single call to syncVolume, expecting provisioning to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestProvisionSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume
"11-1 - successful provision",
novolumes,
newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass),
noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
},
{
// Provision failure - plugin not found
"11-2 - plugin not found",
novolumes,
novolumes,
newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed"}, noerrors,
testSyncClaim,
},
{
// Provision failure - newProvisioner returns error
"11-3 - newProvisioner failure",
novolumes,
novolumes,
newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed"}, noerrors,
wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim),
},
{
// Provision failure - Provision returns error
"11-4 - provision failure",
novolumes,
novolumes,
newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed"}, noerrors,
wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim),
},
{
// Provision success - there is already a volume available, still
// we provision a new one when requested.
"11-6 - provisioning when there is a volume available",
newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
[]*api.PersistentVolume{
newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
},
newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass),
noevents, noerrors,
// No provisioning plugin confingure - makes the test fail when
// the controller errorneously tries to provision something
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
},
{
// Provision success? - claim is bound before provisioner creates
// a volume.
"11-7 - claim is bound before provisioning",
novolumes,
newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass),
// The claim would be bound in next syncClaim
newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass),
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Create a volume before provisionClaimOperation starts.
// This similates a parallel controller provisioning the volume.
reactor.lock.Lock()
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)
reactor.volumes[volume.Name] = volume
reactor.lock.Unlock()
}),
},
{
// Provision success - cannot save provisioned PV once,
// second retry succeeds
"11-8 - cannot save provisioned volume",
novolumes,
newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass),
noevents,
[]reactorError{
// Inject error to the first
// kubeclient.PersistentVolumes.Create() call. All other calls
// will succeed.
{"create", "persistentvolumes", errors.New("Mock creation error")},
},
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
},
{
// Provision success? - cannot save provisioned PV five times,
// volume is deleted and delete succeeds
"11-9 - cannot save provisioned volume, delete succeeds",
novolumes,
novolumes,
newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithControllerConfig(operationDelete, []error{nil},
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete failed - no plugin found
"11-10 - cannot save provisioned volume, no delete plugin found",
novolumes,
novolumes,
newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
// No deleteCalls are configured, which results into no deleter plugin available for the volume
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete failed - deleter returns error five times
"11-11 - cannot save provisioned volume, deleter fails",
novolumes,
novolumes,
newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithControllerConfig(
operationDelete, []error{
errors.New("Mock deletion error1"),
errors.New("Mock deletion error2"),
errors.New("Mock deletion error3"),
errors.New("Mock deletion error4"),
errors.New("Mock deletion error5"),
},
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete succeeds 2nd time
"11-12 - cannot save provisioned volume, delete succeeds 2nd time",
novolumes,
novolumes,
newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass),
newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass),
[]string{"Warning ProvisioningFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithControllerConfig(
operationDelete, []error{
errors.New("Mock deletion error1"),
nil,
},
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
),
},
}
runSyncTests(t, tests)
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestProvisionMultiSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume with binding
"12-1 - successful provision",
novolumes,
newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass),
// Binding will be completed in the next syncClaim
newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted),
noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
},
}
runMultisyncTests(t, tests)
}

View File

@ -0,0 +1,196 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
)
// Test single call to syncVolume, expecting recycling to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestRecycleSync(t *testing.T) {
tests := []controllerTest{
{
// recycle volume bound by controller
"6-1 - successful recycle",
newVolumeArray("volume6-1", "1Gi", "uid6-1", "claim6-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController),
newVolumeArray("volume6-1", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
},
{
// recycle volume bound by user
"6-2 - successful recycle with prebound volume",
newVolumeArray("volume6-2", "1Gi", "uid6-2", "claim6-2", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-2", "1Gi", "", "claim6-2", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
},
{
// recycle failure - plugin not found
"6-3 - plugin not found",
newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeFailed, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume,
},
{
// recycle failure - newRecycler returns error
"6-4 - newRecycler returns error",
newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeFailed, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume),
},
{
// recycle failure - recycle returns error
"6-5 - recycle returns error",
newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeFailed, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume),
},
{
// recycle success(?) - volume is deleted before doRecycle() starts
"6-6 - volume is deleted before recycling",
newVolumeArray("volume6-6", "1Gi", "uid6-6", "claim6-6", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Delete the volume before recycle operation starts
reactor.lock.Lock()
delete(reactor.volumes, "volume6-6")
reactor.lock.Unlock()
}),
},
{
// recycle success(?) - volume is recycled by previous recycler just
// at the time new doRecycle() starts. This simulates "volume no
// longer needs recycling, skipping".
"6-7 - volume is deleted before recycling",
newVolumeArray("volume6-7", "1Gi", "uid6-7", "claim6-7", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController),
newVolumeArray("volume6-7", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.lock.Lock()
volume := reactor.volumes["volume6-7"]
volume.Spec.ClaimRef = nil
volume.Status.Phase = api.VolumeAvailable
volume.Annotations = nil
reactor.lock.Unlock()
}),
},
{
// recycle success(?) - volume bound by user is recycled by previous
// recycler just at the time new doRecycle() starts. This simulates
// "volume no longer needs recycling, skipping" with volume bound by
// user.
"6-8 - prebound volume is deleted before recycling",
newVolumeArray("volume6-8", "1Gi", "uid6-8", "claim6-8", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-8", "1Gi", "", "claim6-8", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.lock.Lock()
volume := reactor.volumes["volume6-8"]
volume.Spec.ClaimRef.UID = ""
volume.Status.Phase = api.VolumeAvailable
reactor.lock.Unlock()
}),
},
{
// recycle success - volume bound by user is recycled, while a new
// claim is created with another UID.
"6-9 - prebound volume is recycled while the claim exists",
newVolumeArray("volume6-9", "1Gi", "uid6-9", "claim6-9", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume6-9", "1Gi", "", "claim6-9", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending),
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending),
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
},
{
// volume has unknown reclaim policy - failure expected
"6-10 - unknown reclaim policy",
newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeBound, "Unknown"),
newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeFailed, "Unknown"),
noclaims,
noclaims,
[]string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume,
},
}
runSyncTests(t, tests)
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestRecycleMultiSync(t *testing.T) {
tests := []controllerTest{
{
// recycle failure - recycle returns error. The controller should
// try again.
"7-1 - recycle returns error",
newVolumeArray("volume7-1", "1Gi", "uid7-1", "claim7-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
newVolumeArray("volume7-1", "1Gi", "", "claim7-1", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests)
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
vol "k8s.io/kubernetes/pkg/volume"
)
// VolumeHost interface implementation for PersistentVolumeController.
var _ vol.VolumeHost = &PersistentVolumeController{}
func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface {
return ctrl.kubeClient
}
func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface {
return ctrl.cloud
}
func (ctrl *PersistentVolumeController) GetMounter() mount.Interface {
return nil
}
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
return nil
}
func (ctrl *PersistentVolumeController) GetHostName() string {
return ""
}

View File

@ -774,6 +774,8 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri
capacity = storage.String()
}
events, _ := d.Events(namespace).Search(pvc)
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", pvc.Name)
fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace)
@ -782,6 +784,10 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri
printLabelsMultiline(out, "Labels", pvc.Labels)
fmt.Fprintf(out, "Capacity:\t%s\n", capacity)
fmt.Fprintf(out, "Access Modes:\t%s\n", accessModes)
if events != nil {
DescribeEvents(events, out)
}
return nil
})
}

View File

@ -408,14 +408,35 @@ type awsElasticBlockStoreProvisioner struct {
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
func (c *awsElasticBlockStoreProvisioner) Provision(pv *api.PersistentVolume) error {
func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
return err
return nil, err
}
pv.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID = volumeID
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: volumeID,
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}
if len(labels) != 0 {
@ -427,34 +448,5 @@ func (c *awsElasticBlockStoreProvisioner) Provision(pv *api.PersistentVolume) er
}
}
return nil
}
func (c *awsElasticBlockStoreProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {
// Provide dummy api.PersistentVolume.Spec, it will be filled in
// awsElasticBlockStoreProvisioner.Provision()
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-aws-",
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): c.options.Capacity,
},
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: volume.ProvisionedVolumeName,
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}, nil
return pv, nil
}

View File

@ -220,14 +220,7 @@ func TestPlugin(t *testing.T) {
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.NewPersistentVolumeTemplate()
if err != nil {
t.Errorf("NewPersistentVolumeTemplate() failed: %v", err)
}
// get 2nd Provisioner - persistent volume controller will do the same
provisioner, err = plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
err = provisioner.Provision(persistentSpec)
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}

View File

@ -427,25 +427,16 @@ type cinderVolumeProvisioner struct {
var _ volume.Provisioner = &cinderVolumeProvisioner{}
func (c *cinderVolumeProvisioner) Provision(pv *api.PersistentVolume) error {
func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
volumeID, sizeGB, err := c.manager.CreateVolume(c)
if err != nil {
return err
return nil, err
}
pv.Spec.PersistentVolumeSource.Cinder.VolumeID = volumeID
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
}
return nil
}
func (c *cinderVolumeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {
// Provide dummy api.PersistentVolume.Spec, it will be filled in
// cinderVolumeProvisioner.Provision()
return &api.PersistentVolume{
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-cinder-",
Labels: map[string]string{},
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "cinder-dynamic-provisioner",
},
@ -454,16 +445,16 @@ func (c *cinderVolumeProvisioner) NewPersistentVolumeTemplate() (*api.Persistent
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): c.options.Capacity,
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
Cinder: &api.CinderVolumeSource{
VolumeID: volume.ProvisionedVolumeName,
VolumeID: volumeID,
FSType: "ext4",
ReadOnly: false,
},
},
},
}, nil
}
return pv, nil
}

View File

@ -212,14 +212,7 @@ func TestPlugin(t *testing.T) {
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
persistentSpec, err := provisioner.NewPersistentVolumeTemplate()
if err != nil {
t.Errorf("NewPersistentVolumeTemplate() failed: %v", err)
}
// get 2nd Provisioner - persistent volume controller will do the same
provisioner, err = plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
err = provisioner.Provision(persistentSpec)
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}

View File

@ -370,14 +370,35 @@ type gcePersistentDiskProvisioner struct {
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
func (c *gcePersistentDiskProvisioner) Provision(pv *api.PersistentVolume) error {
func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
return err
return nil, err
}
pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName = volumeID
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "gce-pd-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: volumeID,
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}
if len(labels) != 0 {
@ -389,34 +410,5 @@ func (c *gcePersistentDiskProvisioner) Provision(pv *api.PersistentVolume) error
}
}
return nil
}
func (c *gcePersistentDiskProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {
// Provide dummy api.PersistentVolume.Spec, it will be filled in
// gcePersistentDiskProvisioner.Provision()
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-gce-",
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "gce-pd-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): c.options.Capacity,
},
PersistentVolumeSource: api.PersistentVolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: volume.ProvisionedVolumeName,
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}, nil
return pv, nil
}

View File

@ -216,14 +216,7 @@ func TestPlugin(t *testing.T) {
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.NewPersistentVolumeTemplate()
if err != nil {
t.Errorf("NewPersistentVolumeTemplate() failed: %v", err)
}
// get 2nd Provisioner - persistent volume controller will do the same
provisioner, err = plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
err = provisioner.Provision(persistentSpec)
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}

View File

@ -252,18 +252,12 @@ type hostPathProvisioner struct {
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision(pv *api.PersistentVolume) error {
if pv.Spec.HostPath == nil {
return fmt.Errorf("pv.Spec.HostPath cannot be nil")
}
return os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
func (r *hostPathProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {
func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())
return &api.PersistentVolume{
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-hostpath-",
Name: r.options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
},
@ -280,7 +274,9 @@ func (r *hostPathProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolu
},
},
},
}, nil
}
return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
// hostPathDeleter deletes a hostPath PV from the cluster.

View File

@ -163,7 +163,7 @@ func TestProvisioner(t *testing.T) {
if err != nil {
t.Errorf("Failed to make a new Provisioner: %v", err)
}
pv, err := creater.NewPersistentVolumeTemplate()
pv, err := creater.Provision()
if err != nil {
t.Errorf("Unexpected error creating volume: %v", err)
}

View File

@ -340,11 +340,12 @@ type FakeProvisioner struct {
Host VolumeHost
}
func (fc *FakeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {
func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())
return &api.PersistentVolume{
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-fakeplugin-",
Name: fc.Options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "fakeplugin-provisioner",
},
@ -361,11 +362,9 @@ func (fc *FakeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume,
},
},
},
}, nil
}
}
func (fc *FakeProvisioner) Provision(pv *api.PersistentVolume) error {
return nil
return pv, nil
}
// FindEmptyDirectoryUsageOnTmpfs finds the expected usage of an empty directory existing on

View File

@ -111,13 +111,10 @@ type Recycler interface {
// Provisioner is an interface that creates templates for PersistentVolumes and can create the volume
// as a new resource in the infrastructure provider.
type Provisioner interface {
// Provision creates the resource by allocating the underlying volume in a storage system.
// This method should block until completion.
Provision(*api.PersistentVolume) error
// NewPersistentVolumeTemplate creates a new PersistentVolume to be used as a template before saving.
// The provisioner will want to tweak its properties, assign correct annotations, etc.
// This func should *NOT* persist the PV in the API. That is left to the caller.
NewPersistentVolumeTemplate() (*api.PersistentVolume, error)
// Provision creates the resource by allocating the underlying volume in a
// storage system. This method should block until completion and returns
// PersistentVolume representing the created storage resource.
Provision() (*api.PersistentVolume, error)
}
// Deleter removes the resource from the underlying storage provider. Calls to this method should block until

View File

@ -49,21 +49,15 @@ func TestPersistentVolumeRecycler(t *testing.T) {
deleteAllEtcdKeys()
// Use higher QPS and Burst, there is a test for race condition below, which
// creates many claims and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000})
recyclerClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000})
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}, 0, 0, nil, nil, nil, nil}}
cloud := &fake_cloud.FakeCloud{}
binder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(binderClient, 10*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := persistentvolumecontroller.NewPersistentVolumeRecycler(recyclerClient, 30*time.Second, 3, plugins, cloud)
recycler.Run()
defer recycler.Stop()
ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "", nil, nil, nil)
ctrl.Run()
defer ctrl.Stop()
// This PV will be claimed, released, and recycled.
pv := &api.PersistentVolume{