Merge pull request #13650 from markturansky/prov_creater

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-09-20 16:57:36 -07:00
commit 568c0331b0
5 changed files with 167 additions and 34 deletions

View File

@ -22,7 +22,9 @@ import (
"regexp"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/volume"
)
@ -36,6 +38,7 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
host: nil,
newRecyclerFunc: newRecycler,
newDeleterFunc: newDeleter,
newCreaterFunc: newCreater,
config: volumeConfig,
},
}
@ -46,6 +49,7 @@ func ProbeRecyclableVolumePlugins(recyclerFunc func(spec *volume.Spec, host volu
&hostPathPlugin{
host: nil,
newRecyclerFunc: recyclerFunc,
newCreaterFunc: newCreater,
config: volumeConfig,
},
}
@ -53,17 +57,18 @@ func ProbeRecyclableVolumePlugins(recyclerFunc func(spec *volume.Spec, host volu
type hostPathPlugin struct {
host volume.VolumeHost
// decouple creating recyclers by deferring to a function. Allows for easier testing.
// decouple creating Recyclers/Deleters/Creaters by deferring to a function. Allows for easier testing.
newRecyclerFunc func(spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
// decouple creating deleters by deferring to a function. Allows for easier testing.
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
config volume.VolumeConfig
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
newCreaterFunc func(options volume.VolumeOptions, host volume.VolumeHost) (volume.Creater, error)
config volume.VolumeConfig
}
var _ volume.VolumePlugin = &hostPathPlugin{}
var _ volume.PersistentVolumePlugin = &hostPathPlugin{}
var _ volume.RecyclableVolumePlugin = &hostPathPlugin{}
var _ volume.DeletableVolumePlugin = &hostPathPlugin{}
var _ volume.CreatableVolumePlugin = &hostPathPlugin{}
const (
hostPathPluginName = "kubernetes.io/host-path"
@ -114,6 +119,13 @@ func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, err
return plugin.newDeleterFunc(spec, plugin.host)
}
func (plugin *hostPathPlugin) NewCreater(options volume.VolumeOptions) (volume.Creater, error) {
if len(options.AccessModes) == 0 {
options.AccessModes = plugin.GetAccessModes()
}
return plugin.newCreaterFunc(options, plugin.host)
}
func newRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
@ -134,6 +146,10 @@ func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, erro
return &hostPathDeleter{spec.Name(), spec.PersistentVolume.Spec.HostPath.Path, host}, nil
}
func newCreater(options volume.VolumeOptions, host volume.VolumeHost) (volume.Creater, error) {
return &hostPathCreater{options: options, host: host}, nil
}
// HostPath volumes represent a bare host file or directory mount.
// The direct at the specified path will be directly exposed to the container.
type hostPath struct {
@ -185,8 +201,8 @@ func (c *hostPathCleaner) TearDownAt(dir string) error {
return fmt.Errorf("TearDownAt() does not make sense for host paths")
}
// hostPathRecycler scrubs a hostPath volume by running "rm -rf" on the volume in a pod
// This recycler only works on a single host cluster and is for testing purposes only.
// hostPathRecycler implements a dynamic provisioning Recycler for the HostPath plugin
// This implementation is meant for testing only and only works in a single node cluster
type hostPathRecycler struct {
name string
path string
@ -215,6 +231,44 @@ func (r *hostPathRecycler) Recycle() error {
return volume.RecycleVolumeByWatchingPodUntilCompletion(pod, r.host.GetKubeClient())
}
// hostPathCreater implements a dynamic provisioning Creater for the HostPath plugin
// This implementation is meant for testing only and only works in a single node cluster.
type hostPathCreater struct {
host volume.VolumeHost
options volume.VolumeOptions
}
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Creater is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathCreater) Create() (*api.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())
err := os.MkdirAll(fullpath, 0750)
if err != nil {
return nil, err
}
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-hostpath-",
Labels: map[string]string{
"createdby": "hostpath dynamic provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
AccessModes: r.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", r.options.CapacityMB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: fullpath,
},
},
},
}, nil
}
// hostPathDeleter deletes a hostPath PV from the cluster.
// This deleter only works on a single host cluster and is for testing purposes only.
type hostPathDeleter struct {

View File

@ -22,6 +22,7 @@ import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/types"
@ -67,7 +68,7 @@ func TestGetAccessModes(t *testing.T) {
func TestRecycler(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
pluginHost := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.NewFakeRecycler, nil, volume.VolumeConfig{}}}, pluginHost)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.NewFakeRecycler, nil, nil, volume.VolumeConfig{}}}, pluginHost)
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}}
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
@ -143,6 +144,44 @@ func TestDeleterTempDir(t *testing.T) {
}
}
func TestCreater(t *testing.T) {
tempPath := "/tmp/hostpath/"
defer os.RemoveAll(tempPath)
err := os.MkdirAll(tempPath, 0750)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
creater, err := plug.NewCreater(volume.VolumeOptions{CapacityMB: 100, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete})
if err != nil {
t.Errorf("Failed to make a new Creater: %v", err)
}
pv, err := creater.Create()
if err != nil {
t.Errorf("Unexpected error creating volume: %v", err)
}
if pv.Spec.HostPath.Path == "" {
t.Errorf("Expected pv.Spec.HostPath.Path to not be empty: %#v", pv)
}
expectedCapacity := resource.NewQuantity(100*1024*1024, resource.BinarySI)
actualCapacity := pv.Spec.Capacity[api.ResourceStorage]
expectedAmt := expectedCapacity.Value()
actualAmt := actualCapacity.Value()
if expectedAmt != actualAmt {
t.Errorf("Expected capacity %+v but got %+v", expectedAmt, actualAmt)
}
if pv.Spec.PersistentVolumeReclaimPolicy != api.PersistentVolumeReclaimDelete {
t.Errorf("Expected reclaim policy %+v but got %+v", api.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
}
os.RemoveAll(pv.Spec.HostPath.Path)
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil))

View File

@ -33,14 +33,21 @@ import (
)
// VolumeOptions contains option information about a volume.
//
// Currently, this struct containers only a single field for the
// rootcontext of the volume. This is a temporary measure in order
// to set the rootContext of tmpfs mounts correctly; it will be replaced
// and expanded on by future SecurityContext work.
type VolumeOptions struct {
// The rootcontext to use when performing mounts for a volume.
// This is a temporary measure in order to set the rootContext of tmpfs mounts correctly.
// it will be replaced and expanded on by future SecurityContext work.
RootContext string
// The attributes below are required by volume.Creater
// perhaps CreaterVolumeOptions struct?
// CapacityMB is the size in MB of a volume.
CapacityMB int
// AccessModes of a volume
AccessModes []api.PersistentVolumeAccessMode
// Reclamation policy for a persistent volume
PersistentVolumeReclaimPolicy api.PersistentVolumeReclaimPolicy
}
// VolumePlugin is an interface to volume plugins that can be used on a
@ -99,6 +106,14 @@ type DeletableVolumePlugin interface {
NewDeleter(spec *Spec) (Deleter, error)
}
// CreatableVolumePlugin is an extended interface of VolumePlugin and is used to create volumes for the cluster.
type CreatableVolumePlugin interface {
VolumePlugin
// NewCreater creates a new volume.Creater which knows how to create PersistentVolumes in accordance with
// the plugin's underlying storage provider
NewCreater(options VolumeOptions) (Creater, error)
}
// VolumeHost is an interface that plugins can use to access the kubelet.
type VolumeHost interface {
// GetPluginDir returns the absolute path to a directory under which
@ -345,6 +360,19 @@ func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolum
return nil, fmt.Errorf("no deletable volume plugin matched")
}
// FindCreatablePluginBySpec fetches a persistent volume plugin by name. If no plugin
// is found, returns error.
func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (CreatableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if creatableVolumePlugin, ok := volumePlugin.(CreatableVolumePlugin); ok {
return creatableVolumePlugin, nil
}
return nil, fmt.Errorf("no creatable volume plugin matched")
}
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler pod. By default, a recycler pod simply runs
// "rm -rf" on a volume and tests for emptiness. Most attributes of the template will be correct for most
// plugin implementations. The following attributes can be overridden per plugin via configuration:

View File

@ -18,6 +18,7 @@ package volume
import (
"io/ioutil"
"k8s.io/kubernetes/pkg/api"
"os"
"path"
)
@ -65,6 +66,12 @@ type Recycler interface {
Recycle() error
}
// Create adds a new resource in the storage provider and creates a PersistentVolume for the new resource.
// Calls to Create should block until complete.
type Creater interface {
Create() (*api.PersistentVolume, error)
}
// Delete removes the resource from the underlying storage provider. Calls to this method should block until
// the deletion is complete. Any error returned indicates the volume has failed to be reclaimed.
// A nil return indicates success.

View File

@ -42,20 +42,21 @@ func TestPersistentVolumeClaimBinder(t *testing.T) {
defer s.Close()
deleteAllEtcdKeys()
client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(client, 1*time.Second)
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second)
binder.Run()
defer binder.Stop()
for _, volume := range createTestVolumes() {
_, err := client.PersistentVolumes().Create(volume)
_, err := testClient.PersistentVolumes().Create(volume)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
volumes, err := client.PersistentVolumes().List(labels.Everything(), fields.Everything())
volumes, err := testClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -64,13 +65,13 @@ func TestPersistentVolumeClaimBinder(t *testing.T) {
}
for _, claim := range createTestClaims() {
_, err := client.PersistentVolumeClaims(api.NamespaceDefault).Create(claim)
_, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(claim)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
claims, err := client.PersistentVolumeClaims(api.NamespaceDefault).List(labels.Everything(), fields.Everything())
claims, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).List(labels.Everything(), fields.Everything())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -79,7 +80,7 @@ func TestPersistentVolumeClaimBinder(t *testing.T) {
}
// the binder will eventually catch up and set status on Claims
watch, err := client.PersistentVolumeClaims(api.NamespaceDefault).Watch(labels.Everything(), fields.Everything(), "0")
watch, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Watch(labels.Everything(), fields.Everything(), "0")
if err != nil {
t.Fatalf("Couldn't subscribe to PersistentVolumeClaims: %v", err)
}
@ -99,7 +100,7 @@ func TestPersistentVolumeClaimBinder(t *testing.T) {
}
for _, claim := range createTestClaims() {
claim, err := client.PersistentVolumeClaims(api.NamespaceDefault).Get(claim.Name)
claim, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Get(claim.Name)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -213,13 +214,15 @@ func TestPersistentVolumeRecycler(t *testing.T) {
defer s.Close()
deleteAllEtcdKeys()
client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(client, 1*time.Second)
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(client, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler.Run()
defer recycler.Stop()
@ -242,17 +245,17 @@ func TestPersistentVolumeRecycler(t *testing.T) {
},
}
watch, _ := client.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0")
watch, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0")
defer watch.Stop()
_, _ = client.PersistentVolumes().Create(pv)
_, _ = client.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(watch, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := client.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
@ -275,13 +278,15 @@ func TestPersistentVolumeDeleter(t *testing.T) {
defer s.Close()
deleteAllEtcdKeys()
client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(client, 1*time.Second)
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(client, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler.Run()
defer recycler.Stop()
@ -304,17 +309,17 @@ func TestPersistentVolumeDeleter(t *testing.T) {
},
}
w, _ := client.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0")
w, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0")
defer w.Stop()
_, _ = client.PersistentVolumes().Create(pv)
_, _ = client.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := client.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}