mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Implement localDriver
This commit is contained in:
parent
d57d606275
commit
aa87e4b57c
@ -39,6 +39,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -120,7 +121,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, testResource i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
ntr, ok := testResource.(*nfsTestResource)
|
ntr, ok := testResource.(*nfsTestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource")
|
||||||
return &v1.PersistentVolumeSource{
|
return &v1.PersistentVolumeSource{
|
||||||
@ -129,7 +130,7 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, test
|
|||||||
Path: "/",
|
Path: "/",
|
||||||
ReadOnly: readOnly,
|
ReadOnly: readOnly,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
||||||
@ -276,7 +277,7 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, testReso
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
gtr, ok := testResource.(*glusterTestResource)
|
gtr, ok := testResource.(*glusterTestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource")
|
||||||
|
|
||||||
@ -288,7 +289,7 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string
|
|||||||
Path: "test_vol",
|
Path: "test_vol",
|
||||||
ReadOnly: readOnly,
|
ReadOnly: readOnly,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *glusterFSDriver) CreateDriver() {
|
func (g *glusterFSDriver) CreateDriver() {
|
||||||
@ -402,7 +403,7 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, testResource
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
itr, ok := testResource.(*iSCSITestResource)
|
itr, ok := testResource.(*iSCSITestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource")
|
||||||
|
|
||||||
@ -417,7 +418,7 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te
|
|||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
pvSource.ISCSI.FSType = fsType
|
pvSource.ISCSI.FSType = fsType
|
||||||
}
|
}
|
||||||
return &pvSource
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *iSCSIDriver) CreateDriver() {
|
func (i *iSCSIDriver) CreateDriver() {
|
||||||
@ -519,7 +520,7 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, testResource i
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
f := r.driverInfo.Config.Framework
|
f := r.driverInfo.Config.Framework
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
@ -542,7 +543,7 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, test
|
|||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
pvSource.RBD.FSType = fsType
|
pvSource.RBD.FSType = fsType
|
||||||
}
|
}
|
||||||
return &pvSource
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rbdDriver) CreateDriver() {
|
func (r *rbdDriver) CreateDriver() {
|
||||||
@ -637,7 +638,7 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, testResourc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
f := c.driverInfo.Config.Framework
|
f := c.driverInfo.Config.Framework
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
@ -654,7 +655,7 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, t
|
|||||||
},
|
},
|
||||||
ReadOnly: readOnly,
|
ReadOnly: readOnly,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cephFSDriver) CreateDriver() {
|
func (c *cephFSDriver) CreateDriver() {
|
||||||
@ -1026,7 +1027,7 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, testResourc
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
ctr, ok := testResource.(*cinderTestResource)
|
ctr, ok := testResource.(*cinderTestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource")
|
||||||
|
|
||||||
@ -1039,7 +1040,7 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, t
|
|||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
pvSource.Cinder.FSType = fsType
|
pvSource.Cinder.FSType = fsType
|
||||||
}
|
}
|
||||||
return &pvSource
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
||||||
@ -1192,7 +1193,7 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, testResource
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
gtr, ok := testResource.(*gcePdTestResource)
|
gtr, ok := testResource.(*gcePdTestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource")
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
@ -1204,7 +1205,7 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te
|
|||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
pvSource.GCEPersistentDisk.FSType = fsType
|
pvSource.GCEPersistentDisk.FSType = fsType
|
||||||
}
|
}
|
||||||
return &pvSource
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
||||||
@ -1315,14 +1316,14 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResour
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
vtr, ok := testResource.(*vSphereTestResource)
|
vtr, ok := testResource.(*vSphereTestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource")
|
||||||
|
|
||||||
// vSphere driver doesn't seem to support readOnly volume
|
// vSphere driver doesn't seem to support readOnly volume
|
||||||
// TODO: check if it is correct
|
// TODO: check if it is correct
|
||||||
if readOnly {
|
if readOnly {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||||
@ -1332,7 +1333,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string,
|
|||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
pvSource.VsphereVolume.FSType = fsType
|
pvSource.VsphereVolume.FSType = fsType
|
||||||
}
|
}
|
||||||
return &pvSource
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
||||||
@ -1440,7 +1441,7 @@ func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, testResource
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
atr, ok := testResource.(*azureTestResource)
|
atr, ok := testResource.(*azureTestResource)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource")
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource")
|
||||||
|
|
||||||
@ -1456,7 +1457,7 @@ func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te
|
|||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
pvSource.AzureDisk.FSType = &fsType
|
pvSource.AzureDisk.FSType = &fsType
|
||||||
}
|
}
|
||||||
return &pvSource
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
||||||
@ -1558,7 +1559,7 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, testResource i
|
|||||||
return &volSource
|
return &volSource
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
|
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||||
VolumeID: a.volumeName,
|
VolumeID: a.volumeName,
|
||||||
@ -1607,3 +1608,184 @@ func (a *awsDriver) DeleteVolume(volType testpatterns.TestVolType, testResource
|
|||||||
framework.DeletePDWithRetry(a.volumeName)
|
framework.DeletePDWithRetry(a.volumeName)
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// local
|
||||||
|
type localDriver struct {
|
||||||
|
driverInfo testsuites.DriverInfo
|
||||||
|
node *v1.Node
|
||||||
|
hostExec utils.HostExec
|
||||||
|
// volumeType represents local volume type we are testing, e.g. tmpfs,
|
||||||
|
// directory, block device.
|
||||||
|
volumeType utils.LocalVolumeType
|
||||||
|
ltrMgr utils.LocalTestResourceManager
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// capabilities
|
||||||
|
defaultLocalVolumeCapabilities = map[testsuites.Capability]bool{
|
||||||
|
testsuites.CapPersistence: true,
|
||||||
|
testsuites.CapFsGroup: true,
|
||||||
|
testsuites.CapBlock: false,
|
||||||
|
testsuites.CapExec: true,
|
||||||
|
}
|
||||||
|
localVolumeCapabitilies = map[utils.LocalVolumeType]map[testsuites.Capability]bool{
|
||||||
|
utils.LocalVolumeBlock: {
|
||||||
|
testsuites.CapPersistence: true,
|
||||||
|
testsuites.CapFsGroup: true,
|
||||||
|
testsuites.CapBlock: true,
|
||||||
|
testsuites.CapExec: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// fstype
|
||||||
|
defaultLocalVolumeSupportedFsTypes = sets.NewString("")
|
||||||
|
localVolumeSupportedFsTypes = map[utils.LocalVolumeType]sets.String{
|
||||||
|
utils.LocalVolumeBlock: sets.NewString(
|
||||||
|
"", // Default fsType
|
||||||
|
"ext2",
|
||||||
|
"ext3",
|
||||||
|
"ext4",
|
||||||
|
"xfs",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
// max file size
|
||||||
|
defaultLocalVolumeMaxFileSize = testpatterns.FileSizeSmall
|
||||||
|
localVolumeMaxFileSizes = map[utils.LocalVolumeType]int64{}
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ testsuites.TestDriver = &localDriver{}
|
||||||
|
var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{}
|
||||||
|
var _ testsuites.PreprovisionedPVTestDriver = &localDriver{}
|
||||||
|
|
||||||
|
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config testsuites.TestConfig) testsuites.TestDriver {
|
||||||
|
maxFileSize := defaultLocalVolumeMaxFileSize
|
||||||
|
if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
|
||||||
|
maxFileSize = maxFileSizeByVolType
|
||||||
|
}
|
||||||
|
supportedFsTypes := defaultLocalVolumeSupportedFsTypes
|
||||||
|
if supportedFsTypesByType, ok := localVolumeSupportedFsTypes[volumeType]; ok {
|
||||||
|
supportedFsTypes = supportedFsTypesByType
|
||||||
|
}
|
||||||
|
capabilities := defaultLocalVolumeCapabilities
|
||||||
|
if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok {
|
||||||
|
capabilities = capabilitiesByType
|
||||||
|
}
|
||||||
|
return func(config testsuites.TestConfig) testsuites.TestDriver {
|
||||||
|
hostExec := utils.NewHostExec(config.Framework)
|
||||||
|
// custom tag to distinguish from tests of other volume types
|
||||||
|
featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType)
|
||||||
|
// For GCE Local SSD volumes, we must run serially
|
||||||
|
if volumeType == utils.LocalVolumeGCELocalSSD {
|
||||||
|
featureTag += " [Serial]"
|
||||||
|
}
|
||||||
|
return &localDriver{
|
||||||
|
driverInfo: testsuites.DriverInfo{
|
||||||
|
Name: "local",
|
||||||
|
FeatureTag: featureTag,
|
||||||
|
MaxFileSize: maxFileSize,
|
||||||
|
SupportedFsType: supportedFsTypes,
|
||||||
|
Capabilities: capabilities,
|
||||||
|
Config: config,
|
||||||
|
},
|
||||||
|
hostExec: hostExec,
|
||||||
|
volumeType: volumeType,
|
||||||
|
ltrMgr: utils.NewLocalResourceManager("local-driver", hostExec, "/tmp"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) GetDriverInfo() *testsuites.DriverInfo {
|
||||||
|
return &l.driverInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||||
|
if l.volumeType == utils.LocalVolumeGCELocalSSD {
|
||||||
|
ssdInterface := "scsi"
|
||||||
|
filesystemType := "fs"
|
||||||
|
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
|
||||||
|
res, err := l.hostExec.IssueCommandWithResult(ssdCmd, l.node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
num, err := strconv.Atoi(strings.TrimSpace(res))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
if num < 1 {
|
||||||
|
framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) CreateDriver() {
|
||||||
|
// choose a randome node to test against
|
||||||
|
l.node = l.randomNode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) CleanupDriver() {
|
||||||
|
l.hostExec.Cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) randomNode() *v1.Node {
|
||||||
|
f := l.driverInfo.Config.Framework
|
||||||
|
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
|
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
|
return &node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) interface{} {
|
||||||
|
switch volType {
|
||||||
|
case testpatterns.PreprovisionedPV:
|
||||||
|
node := l.node
|
||||||
|
// assign this to schedule pod on this node
|
||||||
|
l.driverInfo.Config.ClientNodeName = node.Name
|
||||||
|
return l.ltrMgr.Create(node, l.volumeType, nil)
|
||||||
|
default:
|
||||||
|
framework.Failf("Unsupported volType: %v is specified", volType)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) {
|
||||||
|
ltr, ok := testResource.(*utils.LocalTestResource)
|
||||||
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource")
|
||||||
|
switch volType {
|
||||||
|
case testpatterns.PreprovisionedPV:
|
||||||
|
l.ltrMgr.Remove(ltr)
|
||||||
|
default:
|
||||||
|
framework.Failf("Unsupported volType: %v is specified", volType)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
|
||||||
|
nodeKey := "kubernetes.io/hostname"
|
||||||
|
if node.Labels == nil {
|
||||||
|
framework.Failf("Node does not have labels")
|
||||||
|
}
|
||||||
|
nodeValue, found := node.Labels[nodeKey]
|
||||||
|
if !found {
|
||||||
|
framework.Failf("Node does not have required label %q", nodeKey)
|
||||||
|
}
|
||||||
|
return &v1.VolumeNodeAffinity{
|
||||||
|
Required: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: nodeKey,
|
||||||
|
Operator: v1.NodeSelectorOpIn,
|
||||||
|
Values: []string{nodeValue},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
|
ltr, ok := testResource.(*utils.LocalTestResource)
|
||||||
|
Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource")
|
||||||
|
return &v1.PersistentVolumeSource{
|
||||||
|
Local: &v1.LocalVolumeSource{
|
||||||
|
Path: ltr.Path,
|
||||||
|
FSType: &fsType,
|
||||||
|
},
|
||||||
|
}, l.nodeAffinityForNode(ltr.Node)
|
||||||
|
}
|
||||||
|
@ -40,6 +40,14 @@ var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
|
|||||||
drivers.InitVSphereDriver,
|
drivers.InitVSphereDriver,
|
||||||
drivers.InitAzureDriver,
|
drivers.InitAzureDriver,
|
||||||
drivers.InitAwsDriver,
|
drivers.InitAwsDriver,
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectory),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLink),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryBindMounted),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLinkBindMounted),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeTmpfs),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeBlock),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeBlockFS),
|
||||||
|
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeGCELocalSSD),
|
||||||
}
|
}
|
||||||
|
|
||||||
// List of testSuites to be executed in below loop
|
// List of testSuites to be executed in below loop
|
||||||
|
@ -157,9 +157,9 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
|
|||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
framework.Logf("Creating resource for pre-provisioned PV")
|
framework.Logf("Creating resource for pre-provisioned PV")
|
||||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||||
pvSource := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
|
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
|
||||||
if pvSource != nil {
|
if pvSource != nil {
|
||||||
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false)
|
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false)
|
||||||
}
|
}
|
||||||
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
|
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
|
||||||
}
|
}
|
||||||
@ -231,12 +231,14 @@ func createVolumeSourceWithPVCPV(
|
|||||||
f *framework.Framework,
|
f *framework.Framework,
|
||||||
name string,
|
name string,
|
||||||
pvSource *v1.PersistentVolumeSource,
|
pvSource *v1.PersistentVolumeSource,
|
||||||
|
volumeNodeAffinity *v1.VolumeNodeAffinity,
|
||||||
readOnly bool,
|
readOnly bool,
|
||||||
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||||
pvConfig := framework.PersistentVolumeConfig{
|
pvConfig := framework.PersistentVolumeConfig{
|
||||||
NamePrefix: fmt.Sprintf("%s-", name),
|
NamePrefix: fmt.Sprintf("%s-", name),
|
||||||
StorageClassName: f.Namespace.Name,
|
StorageClassName: f.Namespace.Name,
|
||||||
PVSource: *pvSource,
|
PVSource: *pvSource,
|
||||||
|
NodeAffinity: volumeNodeAffinity,
|
||||||
}
|
}
|
||||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||||
StorageClassName: &f.Namespace.Name,
|
StorageClassName: &f.Namespace.Name,
|
||||||
|
@ -58,10 +58,11 @@ type InlineVolumeTestDriver interface {
|
|||||||
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
|
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
|
||||||
type PreprovisionedPVTestDriver interface {
|
type PreprovisionedPVTestDriver interface {
|
||||||
PreprovisionedVolumeTestDriver
|
PreprovisionedVolumeTestDriver
|
||||||
// GetPersistentVolumeSource returns a PersistentVolumeSource for pre-provisioned Persistent Volume.
|
// GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume.
|
||||||
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
|
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
|
||||||
// It will return nil, if the TestDriver doesn't support either of the parameters.
|
// It will return nil, if the TestDriver doesn't support either of the parameters.
|
||||||
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource
|
// Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity.
|
||||||
|
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV
|
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV
|
||||||
|
@ -162,8 +162,9 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa
|
|||||||
volType := pattern.VolType
|
volType := pattern.VolType
|
||||||
|
|
||||||
var (
|
var (
|
||||||
scName string
|
scName string
|
||||||
pvSource *v1.PersistentVolumeSource
|
pvSource *v1.PersistentVolumeSource
|
||||||
|
volumeNodeAffinity *v1.VolumeNodeAffinity
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create volume for pre-provisioned volume tests
|
// Create volume for pre-provisioned volume tests
|
||||||
@ -177,12 +178,12 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa
|
|||||||
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
|
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
|
||||||
}
|
}
|
||||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||||
pvSource = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
|
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
|
||||||
if pvSource == nil {
|
if pvSource == nil {
|
||||||
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource)
|
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity)
|
||||||
s.sc = sc
|
s.sc = sc
|
||||||
s.pv = framework.MakePersistentVolume(pvConfig)
|
s.pv = framework.MakePersistentVolume(pvConfig)
|
||||||
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
|
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
|
||||||
@ -373,7 +374,7 @@ func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
|
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
|
||||||
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
|
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,
|
||||||
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
|
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
|
||||||
// StorageClass
|
// StorageClass
|
||||||
scConfig := &storagev1.StorageClass{
|
scConfig := &storagev1.StorageClass{
|
||||||
@ -386,6 +387,7 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1
|
|||||||
// PV
|
// PV
|
||||||
pvConfig := framework.PersistentVolumeConfig{
|
pvConfig := framework.PersistentVolumeConfig{
|
||||||
PVSource: pvSource,
|
PVSource: pvSource,
|
||||||
|
NodeAffinity: volumeNodeAffinity,
|
||||||
NamePrefix: pvNamePrefix,
|
NamePrefix: pvNamePrefix,
|
||||||
StorageClassName: scName,
|
StorageClassName: scName,
|
||||||
VolumeMode: &volMode,
|
VolumeMode: &volMode,
|
||||||
|
@ -10,6 +10,8 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"deployment.go",
|
"deployment.go",
|
||||||
"framework.go",
|
"framework.go",
|
||||||
|
"host_exec.go",
|
||||||
|
"local.go",
|
||||||
"utils.go",
|
"utils.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
|
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
|
||||||
@ -20,6 +22,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
124
test/e2e/storage/utils/host_exec.go
Normal file
124
test/e2e/storage/utils/host_exec.go
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostExec represents interface we require to execute commands on remote host.
|
||||||
|
type HostExec interface {
|
||||||
|
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
|
||||||
|
IssueCommand(cmd string, node *v1.Node) error
|
||||||
|
Cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
// hostExecutor implements HostExec
|
||||||
|
type hostExecutor struct {
|
||||||
|
*framework.Framework
|
||||||
|
nodeExecPods map[string]*v1.Pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHostExec returns a HostExec
|
||||||
|
func NewHostExec(framework *framework.Framework) HostExec {
|
||||||
|
return &hostExecutor{
|
||||||
|
Framework: framework,
|
||||||
|
nodeExecPods: make(map[string]*v1.Pod),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// launchNodeExecPod launches a hostexec pod for local PV and waits
|
||||||
|
// until it's Running.
|
||||||
|
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||||
|
f := h.Framework
|
||||||
|
cs := f.ClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
hostExecPod := framework.NewHostExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node))
|
||||||
|
hostExecPod.Spec.NodeName = node
|
||||||
|
hostExecPod.Spec.Volumes = []v1.Volume{
|
||||||
|
{
|
||||||
|
// Required to enter into host mount namespace via nsenter.
|
||||||
|
Name: "rootfs",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{
|
||||||
|
Path: "/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
MountPath: "/rootfs",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
|
||||||
|
Privileged: func(privileged bool) *bool {
|
||||||
|
return &privileged
|
||||||
|
}(true),
|
||||||
|
}
|
||||||
|
pod, err := cs.CoreV1().Pods(ns.Name).Create(hostExecPod)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
err = framework.WaitForPodRunningInNamespace(cs, pod)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssueCommandWithResult issues command on given node and returns stdout.
|
||||||
|
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
|
||||||
|
pod, ok := h.nodeExecPods[node.Name]
|
||||||
|
if !ok {
|
||||||
|
pod = h.launchNodeExecPod(node.Name)
|
||||||
|
if pod == nil {
|
||||||
|
return "", fmt.Errorf("failed to create hostexec pod for node %q", node)
|
||||||
|
}
|
||||||
|
h.nodeExecPods[node.Name] = pod
|
||||||
|
}
|
||||||
|
args := []string{
|
||||||
|
"exec",
|
||||||
|
fmt.Sprintf("--namespace=%v", pod.Namespace),
|
||||||
|
pod.Name,
|
||||||
|
"--",
|
||||||
|
"nsenter",
|
||||||
|
"--mount=/rootfs/proc/1/ns/mnt",
|
||||||
|
"--",
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
cmd,
|
||||||
|
}
|
||||||
|
return framework.RunKubectl(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssueCommand works like IssueCommandWithResult, but discards result.
|
||||||
|
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
|
||||||
|
_, err := h.IssueCommandWithResult(cmd, node)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup cleanup resources it created during test.
|
||||||
|
// Note that in most cases it is not necessary to call this because we create
|
||||||
|
// pods under test namespace which will be destroyed in teardown phase.
|
||||||
|
func (h *hostExecutor) Cleanup() {
|
||||||
|
for _, pod := range h.nodeExecPods {
|
||||||
|
framework.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||||
|
}
|
||||||
|
h.nodeExecPods = make(map[string]*v1.Pod)
|
||||||
|
}
|
345
test/e2e/storage/utils/local.go
Normal file
345
test/e2e/storage/utils/local.go
Normal file
@ -0,0 +1,345 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Various local test resource implementations.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LocalVolumeType represents type of local volume, e.g. tmpfs, directory,
|
||||||
|
// block, etc.
|
||||||
|
type LocalVolumeType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// A simple directory as local volume
|
||||||
|
LocalVolumeDirectory LocalVolumeType = "dir"
|
||||||
|
// Like LocalVolumeDirectory but it's a symbolic link to directory
|
||||||
|
LocalVolumeDirectoryLink LocalVolumeType = "dir-link"
|
||||||
|
// Like LocalVolumeDirectory but bind mounted
|
||||||
|
LocalVolumeDirectoryBindMounted LocalVolumeType = "dir-bindmounted"
|
||||||
|
// Like LocalVolumeDirectory but it's a symbolic link to self bind mounted directory
|
||||||
|
// Note that bind mounting at symbolic link actually mounts at directory it
|
||||||
|
// links to
|
||||||
|
LocalVolumeDirectoryLinkBindMounted LocalVolumeType = "dir-link-bindmounted"
|
||||||
|
// Use temporary filesystem as local volume
|
||||||
|
LocalVolumeTmpfs LocalVolumeType = "tmpfs"
|
||||||
|
// Block device, creates a local file, and maps it as a block device
|
||||||
|
LocalVolumeBlock LocalVolumeType = "block"
|
||||||
|
// Filesystem backed by a block device
|
||||||
|
LocalVolumeBlockFS LocalVolumeType = "blockfs"
|
||||||
|
// Use GCE Local SSD as local volume, this is a filesystem
|
||||||
|
LocalVolumeGCELocalSSD LocalVolumeType = "gce-localssd-scsi-fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LocalTestResource represents test resource of a local volume.
|
||||||
|
type LocalTestResource struct {
|
||||||
|
VolumeType LocalVolumeType
|
||||||
|
Node *v1.Node
|
||||||
|
// Volume path, path to filesystem or block device on the node
|
||||||
|
Path string
|
||||||
|
// If volume is backed by a loop device, we create loop device storage file
|
||||||
|
// under this directory.
|
||||||
|
loopDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalTestResourceManager represents interface to create/destroy local test resources on node
|
||||||
|
type LocalTestResourceManager interface {
|
||||||
|
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
|
||||||
|
Remove(ltr *LocalTestResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ltrMgr implements LocalTestResourceManager
|
||||||
|
type ltrMgr struct {
|
||||||
|
prefix string
|
||||||
|
hostExec HostExec
|
||||||
|
// hostBase represents a writable directory on the host under which we
|
||||||
|
// create test directories
|
||||||
|
hostBase string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLocalResourceManager returns a instance of LocalTestResourceManager
|
||||||
|
func NewLocalResourceManager(prefix string, hostExec HostExec, hostBase string) LocalTestResourceManager {
|
||||||
|
return <rMgr{
|
||||||
|
prefix: prefix,
|
||||||
|
hostExec: hostExec,
|
||||||
|
hostBase: hostBase,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTestDir returns a test dir under `hostBase` directory with randome name.
|
||||||
|
func (l *ltrMgr) getTestDir() string {
|
||||||
|
testDirName := fmt.Sprintf("%s-%s", l.prefix, string(uuid.NewUUID()))
|
||||||
|
return filepath.Join(l.hostBase, testDirName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
hostDir := l.getTestDir()
|
||||||
|
By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
|
||||||
|
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: hostDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
|
||||||
|
By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
|
||||||
|
err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Removing the test directory")
|
||||||
|
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
|
||||||
|
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
|
||||||
|
By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
|
||||||
|
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
|
||||||
|
count := size / 4096
|
||||||
|
// xfs requires at least 4096 blocks
|
||||||
|
if count < 4096 {
|
||||||
|
count = 4096
|
||||||
|
}
|
||||||
|
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
|
||||||
|
losetupCmd := fmt.Sprintf("sudo losetup -f %s/file", dir)
|
||||||
|
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
// findLoopDevice finds loop device path by its associated storage directory.
|
||||||
|
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
|
||||||
|
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
|
||||||
|
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return strings.TrimSpace(loopDevResult)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
loopDir := l.getTestDir()
|
||||||
|
l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024)
|
||||||
|
loopDev := l.findLoopDevice(loopDir, node)
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: loopDev,
|
||||||
|
loopDir: loopDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// teardownLoopDevice tears down loop device by its associated storage directory.
|
||||||
|
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
|
||||||
|
loopDev := l.findLoopDevice(dir, node)
|
||||||
|
By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
|
||||||
|
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
|
||||||
|
err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
|
||||||
|
l.teardownLoopDevice(ltr.loopDir, ltr.Node)
|
||||||
|
By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
|
||||||
|
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
|
||||||
|
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
ltr := l.setupLocalVolumeBlock(node, parameters)
|
||||||
|
loopDev := ltr.Path
|
||||||
|
loopDir := ltr.loopDir
|
||||||
|
// Format and mount at loopDir and give others rwx for read/write testing
|
||||||
|
cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
|
||||||
|
err := l.hostExec.IssueCommand(cmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: loopDir,
|
||||||
|
loopDir: loopDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
|
||||||
|
umountCmd := fmt.Sprintf("sudo umount %s", ltr.Path)
|
||||||
|
err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
l.cleanupLocalVolumeBlock(ltr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
hostDir := l.getTestDir()
|
||||||
|
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
|
||||||
|
err := l.hostExec.IssueCommand(mkdirCmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: hostDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
|
||||||
|
By("Removing the test directory")
|
||||||
|
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
|
||||||
|
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
hostDir := l.getTestDir()
|
||||||
|
hostDirBackend := hostDir + "-backend"
|
||||||
|
cmd := fmt.Sprintf("mkdir %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
|
||||||
|
err := l.hostExec.IssueCommand(cmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: hostDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
|
||||||
|
By("Removing the test directory")
|
||||||
|
hostDir := ltr.Path
|
||||||
|
hostDirBackend := hostDir + "-backend"
|
||||||
|
removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend)
|
||||||
|
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
hostDir := l.getTestDir()
|
||||||
|
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir)
|
||||||
|
err := l.hostExec.IssueCommand(cmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: hostDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
|
||||||
|
By("Removing the test directory")
|
||||||
|
hostDir := ltr.Path
|
||||||
|
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
|
||||||
|
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
hostDir := l.getTestDir()
|
||||||
|
hostDirBackend := hostDir + "-backend"
|
||||||
|
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
|
||||||
|
err := l.hostExec.IssueCommand(cmd, node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: hostDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
|
||||||
|
By("Removing the test directory")
|
||||||
|
hostDir := ltr.Path
|
||||||
|
hostDirBackend := hostDir + "-backend"
|
||||||
|
removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
|
||||||
|
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||||
|
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
dirName := strings.Fields(res)[0]
|
||||||
|
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
|
||||||
|
return &LocalTestResource{
|
||||||
|
Node: node,
|
||||||
|
Path: hostDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
|
||||||
|
// This filesystem is attached in cluster initialization, we clean all files to make it reusable.
|
||||||
|
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
|
||||||
|
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
|
||||||
|
var ltr *LocalTestResource
|
||||||
|
switch volumeType {
|
||||||
|
case LocalVolumeDirectory:
|
||||||
|
ltr = l.setupLocalVolumeDirectory(node, parameters)
|
||||||
|
case LocalVolumeDirectoryLink:
|
||||||
|
ltr = l.setupLocalVolumeDirectoryLink(node, parameters)
|
||||||
|
case LocalVolumeDirectoryBindMounted:
|
||||||
|
ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters)
|
||||||
|
case LocalVolumeDirectoryLinkBindMounted:
|
||||||
|
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters)
|
||||||
|
case LocalVolumeTmpfs:
|
||||||
|
ltr = l.setupLocalVolumeTmpfs(node, parameters)
|
||||||
|
case LocalVolumeBlock:
|
||||||
|
ltr = l.setupLocalVolumeBlock(node, parameters)
|
||||||
|
case LocalVolumeBlockFS:
|
||||||
|
ltr = l.setupLocalVolumeBlockFS(node, parameters)
|
||||||
|
case LocalVolumeGCELocalSSD:
|
||||||
|
ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
|
||||||
|
default:
|
||||||
|
framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ltr == nil {
|
||||||
|
framework.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters)
|
||||||
|
}
|
||||||
|
ltr.VolumeType = volumeType
|
||||||
|
return ltr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) Remove(ltr *LocalTestResource) {
|
||||||
|
switch ltr.VolumeType {
|
||||||
|
case LocalVolumeDirectory:
|
||||||
|
l.cleanupLocalVolumeDirectory(ltr)
|
||||||
|
case LocalVolumeDirectoryLink:
|
||||||
|
l.cleanupLocalVolumeDirectoryLink(ltr)
|
||||||
|
case LocalVolumeDirectoryBindMounted:
|
||||||
|
l.cleanupLocalVolumeDirectoryBindMounted(ltr)
|
||||||
|
case LocalVolumeDirectoryLinkBindMounted:
|
||||||
|
l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr)
|
||||||
|
case LocalVolumeTmpfs:
|
||||||
|
l.cleanupLocalVolumeTmpfs(ltr)
|
||||||
|
case LocalVolumeBlock:
|
||||||
|
l.cleanupLocalVolumeBlock(ltr)
|
||||||
|
case LocalVolumeBlockFS:
|
||||||
|
l.cleanupLocalVolumeBlockFS(ltr)
|
||||||
|
case LocalVolumeGCELocalSSD:
|
||||||
|
l.cleanupLocalVolumeGCELocalSSD(ltr)
|
||||||
|
default:
|
||||||
|
framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user