diff --git a/pkg/controller/volume/persistentvolume/BUILD b/pkg/controller/volume/persistentvolume/BUILD index eaef2490e52..3873ca2f630 100644 --- a/pkg/controller/volume/persistentvolume/BUILD +++ b/pkg/controller/volume/persistentvolume/BUILD @@ -31,6 +31,7 @@ go_library( "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", @@ -41,6 +42,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", @@ -95,6 +97,7 @@ go_test( "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/listers/storage/v1:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 4a3f5b76772..9dba23efdf6 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -26,6 +26,8 @@ import ( storage "k8s.io/api/storage/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -43,6 +45,7 @@ import ( "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" vol "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" "github.com/golang/glog" ) @@ -1067,6 +1070,17 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) return } + pods, used, err := ctrl.isVolumeUsed(newVolume) + if err != nil { + glog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err) + return + } + if used { + msg := fmt.Sprintf("Volume is used by pods: %s", strings.Join(pods, ",")) + glog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg) + ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeFailedRecycle, msg) + return + } // Use the newest volume copy, this will save us from version conflicts on // saving. @@ -1235,6 +1249,32 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo return true, nil } +// isVolumeUsed returns list of pods that use given PV. +func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([]string, bool, error) { + if pv.Spec.ClaimRef == nil { + return nil, false, nil + } + claimName := pv.Spec.ClaimRef.Name + + podNames := sets.NewString() + pods, err := ctrl.podLister.Pods(pv.Spec.ClaimRef.Namespace).List(labels.Everything()) + if err != nil { + return nil, false, fmt.Errorf("error listing pods: %s", err) + } + for _, pod := range pods { + if volumehelper.IsPodTerminated(pod, pod.Status) { + continue + } + for i := range pod.Spec.Volumes { + usedPV := &pod.Spec.Volumes[i] + if usedPV.PersistentVolumeClaim != nil && usedPV.PersistentVolumeClaim.ClaimName == claimName { + podNames.Insert(pod.Namespace + "/" + pod.Name) + } + } + } + return podNames.List(), podNames.Len() != 0, nil +} + // doDeleteVolume finds appropriate delete plugin and deletes given volume. It // returns 'true', when the volume was deleted and 'false' when the volume // cannot be deleted because of the deleter is external. No error should be diff --git a/pkg/controller/volume/persistentvolume/recycle_test.go b/pkg/controller/volume/persistentvolume/recycle_test.go index 80c44703943..73426c44046 100644 --- a/pkg/controller/volume/persistentvolume/recycle_test.go +++ b/pkg/controller/volume/persistentvolume/recycle_test.go @@ -22,6 +22,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Test single call to syncVolume, expecting recycling to happen. @@ -29,6 +30,44 @@ import ( // 2. Call the syncVolume *once*. // 3. Compare resulting volumes with expected volumes. func TestRecycleSync(t *testing.T) { + runningPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "runningPod", + Namespace: testNamespace, + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "vol1", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "runningClaim", + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + } + + pendingPod := runningPod.DeepCopy() + pendingPod.Name = "pendingPod" + pendingPod.Status.Phase = v1.PodPending + pendingPod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = "pendingClaim" + + completedPod := runningPod.DeepCopy() + completedPod.Name = "completedPod" + completedPod.Status.Phase = v1.PodSucceeded + completedPod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = "completedClaim" + + pods := []*v1.Pod{ + runningPod, + pendingPod, + completedPod, + } + tests := []controllerTest{ { // recycle volume bound by controller @@ -160,8 +199,38 @@ func TestRecycleSync(t *testing.T) { noclaims, []string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume, }, + { + // volume is used by a running pod - failure expected + "6-11 - used by running pod", + newVolumeArray("volume6-11", "1Gi", "uid6-11", "runningClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController), + newVolumeArray("volume6-11", "1Gi", "uid6-11", "runningClaim", v1.VolumeReleased, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController), + noclaims, + noclaims, + []string{"Normal VolumeFailedRecycle"}, noerrors, testSyncVolume, + }, + { + // volume is used by a pending pod - failure expected + "6-12 - used by pending pod", + newVolumeArray("volume6-12", "1Gi", "uid6-12", "pendingClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController), + newVolumeArray("volume6-12", "1Gi", "uid6-12", "pendingClaim", v1.VolumeReleased, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController), + noclaims, + noclaims, + []string{"Normal VolumeFailedRecycle"}, noerrors, testSyncVolume, + }, + { + // volume is used by a completed pod - recycle succeeds + "6-13 - used by completed pod", + newVolumeArray("volume6-13", "1Gi", "uid6-13", "completedClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController), + newVolumeArray("volume6-13", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty), + noclaims, + noclaims, + noevents, noerrors, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), + }, } - runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{}) + runSyncTests(t, tests, []*storage.StorageClass{}, pods) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all