Added volume.Deleter interface and simple HostPath implementation

This commit is contained in:
markturansky 2015-09-07 12:11:37 -04:00
parent c2ffe68d0f
commit c2de9e9647
13 changed files with 270 additions and 19 deletions

View File

@ -12090,7 +12090,7 @@
},
"hostPath": {
"$ref": "v1.HostPathVolumeSource",
"description": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for development and testing only. On-host storage is not supported in any way. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath"
"description": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath"
},
"glusterfs": {
"$ref": "v1.GlusterfsVolumeSource",

View File

@ -108,7 +108,7 @@ The reclaim policy for a `PersistentVolume` tells the cluster what to do with th
* iSCSI
* RBD (Ceph Block Device)
* Glusterfs
* HostPath (single node testing only)
* HostPath (single node testing only -- local storage is not supported in any way and WILL NOT WORK in a multi-node cluster)
## Persistent Volumes

View File

@ -244,8 +244,9 @@ type PersistentVolumeSource struct {
// kubelet's host machine and then exposed to the pod.
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"`
// HostPath represents a directory on the host.
// This is useful for development and testing only.
// on-host storage is not supported in any way
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
HostPath *HostPathVolumeSource `json:"hostPath,omitempty"`
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"`
@ -303,12 +304,9 @@ const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
// TODO: implement w/ DeletableVolumePlugin
// PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"

View File

@ -314,8 +314,8 @@ type PersistentVolumeSource struct {
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for development and testing only.
// On-host storage is not supported in any way.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
HostPath *HostPathVolumeSource `json:"hostPath,omitempty"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
@ -388,12 +388,9 @@ const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
// TODO: implement w/ DeletableVolumePlugin
// PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"

View File

@ -846,7 +846,7 @@ var map_PersistentVolumeSource = map[string]string{
"": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
"gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
"awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore",
"hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for development and testing only. On-host storage is not supported in any way. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath",
"hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath",
"glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md",
"nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",

View File

@ -229,6 +229,8 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
} else {
// another process is watching for released volumes.
// PersistentVolumeReclaimPolicy is set per PersistentVolume
// Recycle - sets the PV to Pending and back under this controller's management
// Delete - delete events are handled by this controller's watch. PVs are removed from the index.
}
// volumes are removed by processes external to this binder and must be removed from the cluster

View File

@ -99,12 +99,13 @@ func (recycler *PersistentVolumeRecycler) reclaimVolume(pv *api.PersistentVolume
return fmt.Errorf("PersistentVolume[%s] phase is %s, expected %s. Skipping.", pv.Name, latest.Status.Phase, api.VolumeReleased)
}
// handleRecycle blocks until completion
// both handleRecycle and handleDelete block until completion
// TODO: allow parallel recycling operations to increase throughput
// TODO implement handleDelete in a separate PR w/ cloud volumes
switch pv.Spec.PersistentVolumeReclaimPolicy {
case api.PersistentVolumeReclaimRecycle:
err = recycler.handleRecycle(pv)
case api.PersistentVolumeReclaimDelete:
err = recycler.handleDelete(pv)
case api.PersistentVolumeReclaimRetain:
glog.V(5).Infof("Volume %s is set to retain after release. Skipping.\n", pv.Name)
default:
@ -161,6 +162,49 @@ func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume
return nil
}
func (recycler *PersistentVolumeRecycler) handleDelete(pv *api.PersistentVolume) error {
glog.V(5).Infof("Deleting PersistentVolume[%s]\n", pv.Name)
currentPhase := pv.Status.Phase
nextPhase := currentPhase
spec := volume.NewSpecFromPersistentVolume(pv, false)
plugin, err := recycler.pluginMgr.FindDeletablePluginBySpec(spec)
if err != nil {
return fmt.Errorf("Could not find deletable volume plugin for spec: %+v", err)
}
deleter, err := plugin.NewDeleter(spec)
if err != nil {
return fmt.Errorf("could not obtain Deleter for spec: %+v", err)
}
// blocks until completion
err = deleter.Delete()
if err != nil {
glog.Errorf("PersistentVolume[%s] failed deletion: %+v", pv.Name, err)
pv.Status.Message = fmt.Sprintf("Deletion error: %s", err)
nextPhase = api.VolumeFailed
} else {
glog.V(5).Infof("PersistentVolume[%s] successfully deleted through plugin\n", pv.Name)
// after successful deletion through the plugin, we can also remove the PV from the cluster
err = recycler.client.DeletePersistentVolume(pv)
if err != nil {
return fmt.Errorf("error deleting persistent volume: %+v", err)
}
}
if currentPhase != nextPhase {
glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase)
pv.Status.Phase = nextPhase
_, err := recycler.client.UpdatePersistentVolumeStatus(pv)
if err != nil {
// Rollback to previous phase
pv.Status.Phase = currentPhase
}
}
return nil
}
// Run starts this recycler's control loops
func (recycler *PersistentVolumeRecycler) Run() {
glog.V(5).Infof("Starting PersistentVolumeRecycler\n")
@ -183,6 +227,7 @@ func (recycler *PersistentVolumeRecycler) Stop() {
type recyclerClient interface {
GetPersistentVolume(name string) (*api.PersistentVolume, error)
UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error)
DeletePersistentVolume(volume *api.PersistentVolume) error
UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
}
@ -202,6 +247,10 @@ func (c *realRecyclerClient) UpdatePersistentVolume(volume *api.PersistentVolume
return c.client.PersistentVolumes().Update(volume)
}
func (c *realRecyclerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
return c.client.PersistentVolumes().Delete(volume.Name)
}
func (c *realRecyclerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
return c.client.PersistentVolumes().UpdateStatus(volume)
}

View File

@ -18,6 +18,8 @@ package host_path
import (
"fmt"
"os"
"regexp"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
@ -34,6 +36,7 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
&hostPathPlugin{
host: nil,
newRecyclerFunc: newRecycler,
newDeleterFunc: newDeleter,
config: volumeConfig,
},
}
@ -53,12 +56,15 @@ type hostPathPlugin struct {
host volume.VolumeHost
// decouple creating recyclers by deferring to a function. Allows for easier testing.
newRecyclerFunc func(spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
config volume.VolumeConfig
// decouple creating deleters by deferring to a function. Allows for easier testing.
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
config volume.VolumeConfig
}
var _ volume.VolumePlugin = &hostPathPlugin{}
var _ volume.PersistentVolumePlugin = &hostPathPlugin{}
var _ volume.RecyclableVolumePlugin = &hostPathPlugin{}
var _ volume.DeletableVolumePlugin = &hostPathPlugin{}
const (
hostPathPluginName = "kubernetes.io/host-path"
@ -105,6 +111,10 @@ func (plugin *hostPathPlugin) NewRecycler(spec *volume.Spec) (volume.Recycler, e
return plugin.newRecyclerFunc(spec, plugin.host, plugin.config)
}
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterFunc(spec, plugin.host)
}
func newRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
@ -118,6 +128,13 @@ func newRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.Volume
}, nil
}
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
}
return &hostPathDeleter{spec.Name(), spec.PersistentVolume.Spec.HostPath.Path, host}, nil
}
// HostPath volumes represent a bare host file or directory mount.
// The direct at the specified path will be directly exposed to the container.
type hostPath struct {
@ -198,3 +215,26 @@ func (r *hostPathRecycler) Recycle() error {
}
return volume.RecycleVolumeByWatchingPodUntilCompletion(pod, r.host.GetKubeClient())
}
// hostPathDeleter deletes a hostPath PV from the cluster.
// This deleter only works on a single host cluster and is for testing purposes only.
type hostPathDeleter struct {
name string
path string
host volume.VolumeHost
}
func (r *hostPathDeleter) GetPath() string {
return r.path
}
// Delete for hostPath removes the local directory so long as it is beneath /tmp/*.
// THIS IS FOR TESTING AND LOCAL DEVELOPMENT ONLY! This message should scare you away from using
// this deleter for anything other than development and testing.
func (r *hostPathDeleter) Delete() error {
regexp := regexp.MustCompile("/tmp/.+")
if !regexp.MatchString(r.GetPath()) {
return fmt.Errorf("host_path deleter only supports /tmp/.+ but received provided %s", r.GetPath())
}
return os.RemoveAll(r.GetPath())
}

View File

@ -17,12 +17,15 @@ limitations under the License.
package host_path
import (
"fmt"
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/volume"
)
@ -64,7 +67,7 @@ func TestGetAccessModes(t *testing.T) {
func TestRecycler(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
pluginHost := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.NewFakeRecycler, volume.VolumeConfig{}}}, pluginHost)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.NewFakeRecycler, nil, volume.VolumeConfig{}}}, pluginHost)
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}}
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
@ -83,6 +86,63 @@ func TestRecycler(t *testing.T) {
}
}
func TestDeleter(t *testing.T) {
tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID())
defer os.RemoveAll(tempPath)
err := os.MkdirAll(tempPath, 0750)
if err != nil {
t.Fatal("Failed to create tmp directory for deleter: %v", err)
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
deleter, err := plug.NewDeleter(spec)
if err != nil {
t.Errorf("Failed to make a new Deleter: %v", err)
}
if deleter.GetPath() != tempPath {
t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
}
if err := deleter.Delete(); err != nil {
t.Errorf("Mock Recycler expected to return nil but got %s", err)
}
if exists, _ := util.FileExists("foo"); exists {
t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
}
}
func TestDeleterTempDir(t *testing.T) {
tests := map[string]struct {
expectedFailure bool
path string
}{
"just-tmp": {true, "/tmp"},
"not-tmp": {true, "/nottmp"},
"good-tmp": {false, "/tmp/scratch"},
}
for name, test := range tests {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec)
err := deleter.Delete()
if err == nil && test.expectedFailure {
t.Errorf("Expected failure for test '%s' but got nil err", name)
}
if err != nil && !test.expectedFailure {
t.Errorf("Unexpected failure for test '%s': %v", name, err)
}
}
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil))

View File

@ -89,6 +89,15 @@ type RecyclableVolumePlugin interface {
NewRecycler(spec *Spec) (Recycler, error)
}
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used by persistent volumes that want
// to be deleted from the cluster after their release from a PersistentVolumeClaim.
type DeletableVolumePlugin interface {
VolumePlugin
// NewDeleter creates a new volume.Deleter which knows how to delete this resource
// in accordance with the underlying storage provider after the volume's release from a claim
NewDeleter(spec *Spec) (Deleter, error)
}
// VolumeHost is an interface that plugins can use to access the kubelet.
type VolumeHost interface {
// GetPluginDir returns the absolute path to a directory under which
@ -316,6 +325,19 @@ func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVol
return nil, fmt.Errorf("no recyclable volume plugin matched")
}
// FindDeletablePluginByName fetches a persistent volume plugin by name. If no plugin
// is found, returns error.
func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok {
return deletableVolumePlugin, nil
}
return nil, fmt.Errorf("no deletable volume plugin matched")
}
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler pod. By default, a recycler pod simply runs
// "rm -rf" on a volume and tests for emptiness. Most attributes of the template will be correct for most
// plugin implementations. The following attributes can be overridden per plugin via configuration:

View File

@ -129,6 +129,10 @@ func (plugin *FakeVolumePlugin) NewRecycler(spec *Spec) (Recycler, error) {
return &fakeRecycler{"/attributesTransferredFromSpec"}, nil
}
func (plugin *FakeVolumePlugin) NewDeleter(spec *Spec) (Deleter, error) {
return &FakeDeleter{"/attributesTransferredFromSpec"}, nil
}
func (plugin *FakeVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{}
}
@ -184,3 +188,16 @@ func NewFakeRecycler(spec *Spec, host VolumeHost, config VolumeConfig) (Recycler
path: spec.PersistentVolume.Spec.HostPath.Path,
}, nil
}
type FakeDeleter struct {
path string
}
func (fd *FakeDeleter) Delete() error {
// nil is success, else error
return nil
}
func (fd *FakeDeleter) GetPath() string {
return fd.path
}

View File

@ -65,6 +65,14 @@ type Recycler interface {
Recycle() error
}
// Delete removes the resource from the underlying storage provider. Calls to this method should block until
// the deletion is complete. Any error returned indicates the volume has failed to be reclaimed.
// A nil return indicates success.
type Deleter interface {
Volume
Delete() error
}
func RenameDirectory(oldPath, newName string) (string, error) {
newPath, err := ioutil.TempDir(path.Dir(oldPath), newName)
if err != nil {

View File

@ -269,3 +269,61 @@ func waitForPersistentVolumePhase(w watch.Interface, phase api.PersistentVolumeP
}
}
}
func TestPersistentVolumeDeleter(t *testing.T) {
_, s := runAMaster(t)
defer s.Close()
deleteAllEtcdKeys()
client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(client, 1*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(client, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler.Run()
defer recycler.Stop()
// This PV will be claimed, released, and recycled.
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
},
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
Spec: api.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
},
}
w, _ := client.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0")
defer w.Stop()
_, _ = client.PersistentVolumes().Create(pv)
_, _ = client.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := client.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
for {
event := <-w.ResultChan()
if event.Type == watch.Deleted {
break
}
}
}