mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-11 22:20:18 +00:00
removal of glusterfs in-tree driver code from the source
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
@@ -46,7 +46,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -219,143 +218,6 @@ func (v *nfsVolume) DeleteVolume() {
|
||||
cleanUpVolumeServer(v.f, v.serverPod)
|
||||
}
|
||||
|
||||
// Gluster
|
||||
type glusterFSDriver struct {
|
||||
driverInfo storageframework.DriverInfo
|
||||
}
|
||||
|
||||
type glusterVolume struct {
|
||||
prefix string
|
||||
serverPod *v1.Pod
|
||||
f *framework.Framework
|
||||
}
|
||||
|
||||
var _ storageframework.TestDriver = &glusterFSDriver{}
|
||||
var _ storageframework.PreprovisionedVolumeTestDriver = &glusterFSDriver{}
|
||||
var _ storageframework.InlineVolumeTestDriver = &glusterFSDriver{}
|
||||
var _ storageframework.PreprovisionedPVTestDriver = &glusterFSDriver{}
|
||||
|
||||
// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface
|
||||
func InitGlusterFSDriver() storageframework.TestDriver {
|
||||
return &glusterFSDriver{
|
||||
driverInfo: storageframework.DriverInfo{
|
||||
Name: "gluster",
|
||||
InTreePluginName: "kubernetes.io/glusterfs",
|
||||
MaxFileSize: storageframework.FileSizeMedium,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
Min: "1Gi",
|
||||
},
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
Capabilities: map[storageframework.Capability]bool{
|
||||
storageframework.CapPersistence: true,
|
||||
storageframework.CapExec: true,
|
||||
storageframework.CapRWX: true,
|
||||
storageframework.CapMultiPODs: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) GetDriverInfo() *storageframework.DriverInfo {
|
||||
return &g.driverInfo
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||
gv, ok := e2evolume.(*glusterVolume)
|
||||
if !ok {
|
||||
framework.Failf("failed to cast test volume type %T to the Gluster test volume", e2evolume)
|
||||
}
|
||||
|
||||
name := gv.prefix + "-server"
|
||||
return &v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
gv, ok := e2evolume.(*glusterVolume)
|
||||
if !ok {
|
||||
framework.Failf("failed to cast test volume of type %T to the Gluster test volume", e2evolume)
|
||||
}
|
||||
|
||||
name := gv.prefix + "-server"
|
||||
return &v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsPersistentVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
|
||||
return &storageframework.PerTestConfig{
|
||||
Driver: g,
|
||||
Prefix: "gluster",
|
||||
Framework: f,
|
||||
}, func() {}
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
|
||||
f := config.Framework
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name)
|
||||
config.ServerConfig = &c
|
||||
return &glusterVolume{
|
||||
prefix: config.Prefix,
|
||||
serverPod: serverPod,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *glusterVolume) DeleteVolume() {
|
||||
f := v.f
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
name := v.prefix + "-server"
|
||||
|
||||
nameSpaceName := fmt.Sprintf("%s/%s", ns.Name, name)
|
||||
|
||||
framework.Logf("Deleting Gluster endpoints %s...", nameSpaceName)
|
||||
err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Gluster deleting endpoint %s failed: %v", nameSpaceName, err)
|
||||
}
|
||||
framework.Logf("Gluster endpoints %q not found, assuming deleted", nameSpaceName)
|
||||
}
|
||||
|
||||
framework.Logf("Deleting Gluster service %s...", nameSpaceName)
|
||||
err = cs.CoreV1().Services(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Gluster deleting service %s failed: %v", nameSpaceName, err)
|
||||
}
|
||||
framework.Logf("Gluster service %q not found, assuming deleted", nameSpaceName)
|
||||
}
|
||||
|
||||
framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
|
||||
err = e2epod.DeletePodWithWait(cs, v.serverPod)
|
||||
if err != nil {
|
||||
framework.Failf("Gluster server pod delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// iSCSI
|
||||
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
|
||||
type iSCSIDriver struct {
|
||||
|
Reference in New Issue
Block a user