diff --git a/cluster/gce/gci/mounter/Dockerfile b/cluster/gce/gci/mounter/Dockerfile index f3d4f2c8c7f..1dfabf53ae2 100644 --- a/cluster/gce/gci/mounter/Dockerfile +++ b/cluster/gce/gci/mounter/Dockerfile @@ -14,6 +14,6 @@ FROM ubuntu:xenial -RUN apt-get update && apt-get install -y netbase nfs-common=1:1.2.8-9ubuntu12 glusterfs-client=3.7.6-1ubuntu1 +RUN apt-get update && apt-get install -y netbase nfs-common=1:1.2.8-9ubuntu12 ENTRYPOINT ["/bin/mount"] diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index d65acb097bf..693282d06e8 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -34,7 +34,6 @@ import ( "k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/fc" "k8s.io/kubernetes/pkg/volume/flexvolume" - "k8s.io/kubernetes/pkg/volume/glusterfs" "k8s.io/kubernetes/pkg/volume/hostpath" "k8s.io/kubernetes/pkg/volume/iscsi" "k8s.io/kubernetes/pkg/volume/local" @@ -78,7 +77,6 @@ func ProbeExpandableVolumePlugins(config persistentvolumeconfig.VolumeConfigurat if err != nil { return allPlugins, err } - allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) return allPlugins, nil } @@ -118,7 +116,6 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config persiste klog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err) } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) - allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) var err error allPlugins, err = appendExpandableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate) diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go index e05a21ef14d..4ec7a295a85 100644 --- a/cmd/kubelet/app/plugins.go +++ b/cmd/kubelet/app/plugins.go @@ -31,7 +31,6 @@ import ( "k8s.io/kubernetes/pkg/volume/fc" "k8s.io/kubernetes/pkg/volume/flexvolume" "k8s.io/kubernetes/pkg/volume/git_repo" - "k8s.io/kubernetes/pkg/volume/glusterfs" "k8s.io/kubernetes/pkg/volume/hostpath" "k8s.io/kubernetes/pkg/volume/iscsi" "k8s.io/kubernetes/pkg/volume/local" @@ -64,7 +63,6 @@ func ProbeVolumePlugins(featureGate featuregate.FeatureGate) ([]volume.VolumePlu allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...) allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index ffc625b331e..173695581cb 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -45,7 +45,6 @@ import ( "k8s.io/kubernetes/pkg/volume/emptydir" "k8s.io/kubernetes/pkg/volume/fc" "k8s.io/kubernetes/pkg/volume/git_repo" - "k8s.io/kubernetes/pkg/volume/glusterfs" "k8s.io/kubernetes/pkg/volume/hostpath" "k8s.io/kubernetes/pkg/volume/iscsi" "k8s.io/kubernetes/pkg/volume/local" @@ -73,7 +72,6 @@ func volumePlugins() []volume.VolumePlugin { allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go deleted file mode 100644 index 10e571a8b5d..00000000000 --- a/pkg/volume/glusterfs/glusterfs.go +++ /dev/null @@ -1,1256 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package glusterfs - -import ( - "context" - "crypto/tls" - "fmt" - "math" - "math/rand" - "net/http" - "os" - "path/filepath" - "strconv" - dstrings "strings" - "sync" - - gcli "github.com/heketi/heketi/client/api/go-client" - gapi "github.com/heketi/heketi/pkg/glusterfs/api" - "k8s.io/klog/v2" - "k8s.io/mount-utils" - netutils "k8s.io/utils/net" - utilstrings "k8s.io/utils/strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/uuid" - clientset "k8s.io/client-go/kubernetes" - volumehelpers "k8s.io/cloud-provider/volume/helpers" - storagehelpers "k8s.io/component-helpers/storage/volume" - proxyutil "k8s.io/kubernetes/pkg/proxy/util" - "k8s.io/kubernetes/pkg/volume" - volutil "k8s.io/kubernetes/pkg/volume/util" -) - -// ProbeVolumePlugins is the primary entrypoint for volume plugins. -func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&glusterfsPlugin{host: nil, gidTable: make(map[string]*MinMaxAllocator)}} -} - -type glusterfsPlugin struct { - host volume.VolumeHost - gidTable map[string]*MinMaxAllocator - gidTableLock sync.Mutex -} - -var _ volume.VolumePlugin = &glusterfsPlugin{} -var _ volume.PersistentVolumePlugin = &glusterfsPlugin{} -var _ volume.DeletableVolumePlugin = &glusterfsPlugin{} -var _ volume.ProvisionableVolumePlugin = &glusterfsPlugin{} -var _ volume.ExpandableVolumePlugin = &glusterfsPlugin{} -var _ volume.Provisioner = &glusterfsVolumeProvisioner{} -var _ volume.Deleter = &glusterfsVolumeDeleter{} - -const ( - glusterfsPluginName = "kubernetes.io/glusterfs" - volPrefix = "vol_" - dynamicEpSvcPrefix = "glusterfs-dynamic" - replicaCount = 3 - secretKeyName = "key" // key name used in secret - defaultGidMin = 2000 - defaultGidMax = math.MaxInt32 - - // maxCustomEpNamePrefix is the maximum number of chars. - // which can be used as ep/svc name prefix. This number is carved - // out from below formula. - // max length of name of an ep - length of pvc uuid - // where max length of name of an ep is 63 and length of uuid is 37 - maxCustomEpNamePrefixLen = 26 - - // absoluteGidMin/Max are currently the same as the - // default values, but they play a different role and - // could take a different value. Only thing we need is: - // absGidMin <= defGidMin <= defGidMax <= absGidMax - absoluteGidMin = 2000 - absoluteGidMax = math.MaxInt32 - heketiAnn = "heketi-dynamic-provisioner" - glusterTypeAnn = "gluster.org/type" - glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV" - heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id" - - // Error string returned by heketi - errIDNotFound = "Id not found" -) - -func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error { - plugin.host = host - return nil -} - -func (plugin *glusterfsPlugin) GetPluginName() string { - return glusterfsPluginName -} - -func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - return "", fmt.Errorf("GetVolumeName() is unimplemented for GlusterFS") -} - -func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool { - return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs != nil) || - (spec.Volume != nil && spec.Volume.Glusterfs != nil) -} - -func (plugin *glusterfsPlugin) RequiresRemount(spec *volume.Spec) bool { - return false -} - -func (plugin *glusterfsPlugin) SupportsMountOption() bool { - return true -} - -func (plugin *glusterfsPlugin) SupportsBulkVolumeVerification() bool { - return false -} - -func (plugin *glusterfsPlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) { - return false, nil -} - -func (plugin *glusterfsPlugin) RequiresFSResize() bool { - return false -} - -func (plugin *glusterfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { - return []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - v1.ReadOnlyMany, - v1.ReadWriteMany, - } -} - -func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - epName, epNamespace, err := plugin.getEndpointNameAndNamespace(spec, pod.Namespace) - if err != nil { - return nil, err - } - kubeClient := plugin.host.GetKubeClient() - if kubeClient == nil { - return nil, fmt.Errorf("failed to get kube client to initialize mounter") - } - ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(context.TODO(), epName, metav1.GetOptions{}) - if err != nil { - klog.Errorf("failed to get endpoint %s: %v", epName, err) - return nil, err - } - klog.V(4).Infof("glusterfs pv endpoint %v", ep) - return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName())) -} - -func (plugin *glusterfsPlugin) getEndpointNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) { - if spec.Volume != nil && spec.Volume.Glusterfs != nil { - endpoints := spec.Volume.Glusterfs.EndpointsName - if endpoints == "" { - return "", "", fmt.Errorf("no glusterFS endpoint specified") - } - return endpoints, defaultNamespace, nil - } else if spec.PersistentVolume != nil && - spec.PersistentVolume.Spec.Glusterfs != nil { - endpoints := spec.PersistentVolume.Spec.Glusterfs.EndpointsName - endpointsNs := defaultNamespace - overriddenNs := spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace - if overriddenNs != nil { - if len(*overriddenNs) > 0 { - endpointsNs = *overriddenNs - } else { - return "", "", fmt.Errorf("endpointnamespace field set, but no endpointnamespace specified") - } - } - return endpoints, endpointsNs, nil - } - return "", "", fmt.Errorf("spec does not reference a GlusterFS volume type") - -} -func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) { - volPath, readOnly, err := getVolumeInfo(spec) - if err != nil { - klog.Errorf("failed to get volumesource: %v", err) - return nil, err - } - return &glusterfsMounter{ - glusterfs: &glusterfs{ - volName: spec.Name(), - mounter: mounter, - pod: pod, - plugin: plugin, - MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, utilstrings.EscapeQualifiedName(glusterfsPluginName), spec.Name())), - }, - hosts: ep, - path: volPath, - readOnly: readOnly, - mountOptions: volutil.MountOptionFromSpec(spec), - }, nil -} - -func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName())) -} - -func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) { - return &glusterfsUnmounter{&glusterfs{ - volName: volName, - mounter: mounter, - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}}, - plugin: plugin, - MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, utilstrings.EscapeQualifiedName(glusterfsPluginName), volName)), - }}, nil -} - -func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { - - // To reconstruct volume spec we need endpoint where fetching endpoint from mount - // string looks to be impossible, so returning error. - return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath") -} - -// Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export. -type glusterfs struct { - volName string - pod *v1.Pod - mounter mount.Interface - plugin *glusterfsPlugin - volume.MetricsProvider -} - -type glusterfsMounter struct { - *glusterfs - hosts *v1.Endpoints - path string - readOnly bool - mountOptions []string -} - -var _ volume.Mounter = &glusterfsMounter{} - -func (b *glusterfsMounter) GetAttributes() volume.Attributes { - return volume.Attributes{ - ReadOnly: b.readOnly, - Managed: false, - SELinuxRelabel: false, - } -} - -// SetUp attaches the disk and bind mounts to the volume path. -func (b *glusterfsMounter) SetUp(mounterArgs volume.MounterArgs) error { - return b.SetUpAt(b.GetPath(), mounterArgs) -} - -func (b *glusterfsMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error { - notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - klog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err) - if err != nil && !os.IsNotExist(err) { - return err - } - if !notMnt { - return nil - } - if err := os.MkdirAll(dir, 0750); err != nil { - return err - } - err = b.setUpAtInternal(dir) - if err == nil { - return nil - } - - // Cleanup upon failure. - mount.CleanupMountPoint(dir, b.mounter, false) - return err -} - -func (glusterfsVolume *glusterfs) GetPath() string { - name := glusterfsPluginName - return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, utilstrings.EscapeQualifiedName(name), glusterfsVolume.volName) -} - -type glusterfsUnmounter struct { - *glusterfs -} - -var _ volume.Unmounter = &glusterfsUnmounter{} - -func (c *glusterfsUnmounter) TearDown() error { - return c.TearDownAt(c.GetPath()) -} - -func (c *glusterfsUnmounter) TearDownAt(dir string) error { - return mount.CleanupMountPoint(dir, c.mounter, false) -} - -func (b *glusterfsMounter) setUpAtInternal(dir string) error { - var errs error - options := []string{} - hasLogFile := false - hasLogLevel := false - log := "" - if b.readOnly { - options = append(options, "ro") - } - - // Check for log-file,log-level options existence in user supplied mount options, if provided, use those. - for _, userOpt := range b.mountOptions { - switch { - case dstrings.HasPrefix(userOpt, "log-file"): - klog.V(4).Infof("log-file mount option has provided") - hasLogFile = true - - case dstrings.HasPrefix(userOpt, "log-level"): - klog.V(4).Infof("log-level mount option has provided") - hasLogLevel = true - } - } - - // If logfile has not been provided, create driver specific log file. - if !hasLogFile { - p := filepath.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName) - if err := os.MkdirAll(p, 0750); err != nil { - return fmt.Errorf("failed to create directory %v: %v", p, err) - } - - // adding log-level ERROR to remove noise - // and more specific log path so each pod has - // its own log based on PV + Pod - log = filepath.Join(p, b.pod.Name+"-glusterfs.log") - - // Use derived log file in gluster fuse mount - options = append(options, "log-file="+log) - } - if !hasLogLevel { - options = append(options, "log-level=ERROR") - } - var addrlist []string - if b.hosts == nil { - return fmt.Errorf("glusterfs endpoint is nil in mounter") - } - addr := sets.String{} - if b.hosts.Subsets != nil { - for _, s := range b.hosts.Subsets { - for _, a := range s.Addresses { - if !addr.Has(a.IP) { - addr.Insert(a.IP) - addrlist = append(addrlist, a.IP) - } - } - } - } - - if (len(addrlist) > 0) && (addrlist[0] != "") { - ip := addrlist[rand.Intn(len(addrlist))] - - // Add backup-volfile-servers and auto_unmount options. - // When ip is also in backup-volfile-servers, there will be a warning: - // "gf_remember_backup_volfile_server] 0-glusterfs: failed to set volfile server: File exists". - addr.Delete(ip) - backups := addr.List() - // Avoid an invalid empty backup-volfile-servers option. - if len(backups) > 0 { - options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":")) - } - options = append(options, "auto_unmount") - - mountOptions := volutil.JoinMountOptions(b.mountOptions, options) - // with `backup-volfile-servers` mount option in place, it is not required to - // iterate over all the servers in the addrlist. A mount attempt with this option - // will fetch all the servers mentioned in the backup-volfile-servers list. - // Refer to backup-volfile-servers @ http://docs.gluster.org/en/latest/Administrator%20Guide/Setting%20Up%20Clients/ - - errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions) - if errs == nil { - klog.Infof("successfully mounted directory %s", dir) - return nil - } - if dstrings.Contains(errs.Error(), "Invalid option auto_unmount") || - dstrings.Contains(errs.Error(), "Invalid argument") { - // Give a try without `auto_unmount` mount option, because - // it could be that gluster fuse client is older version and - // mount.glusterfs is unaware of `auto_unmount`. - noAutoMountOptions := make([]string, 0, len(mountOptions)) - for _, opt := range mountOptions { - if opt != "auto_unmount" { - noAutoMountOptions = append(noAutoMountOptions, opt) - } - } - errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", noAutoMountOptions) - if errs == nil { - klog.Infof("successfully mounted %s", dir) - return nil - } - } - } else { - return fmt.Errorf("failed to execute mount command:[no valid ipaddress found in endpoint address list]") - } - - // Failed mount scenario. - // Since glusterfs does not return error text - // it all goes in a log file, we will read the log file - logErr := readGlusterLog(log, b.pod.Name) - if logErr != nil { - return fmt.Errorf("mount failed: %v, the following error information was pulled from the glusterfs log to help diagnose this issue: %v", errs, logErr) - } - return fmt.Errorf("mount failed: %v", errs) - -} - -// getVolumeInfo returns 'path' and 'readonly' field values from the provided glusterfs spec. -func getVolumeInfo(spec *volume.Spec) (string, bool, error) { - if spec.Volume != nil && spec.Volume.Glusterfs != nil { - return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil - } else if spec.PersistentVolume != nil && - spec.PersistentVolume.Spec.Glusterfs != nil { - return spec.PersistentVolume.Spec.Glusterfs.Path, spec.ReadOnly, nil - } - return "", false, fmt.Errorf("spec does not reference a Glusterfs volume type") -} - -func (plugin *glusterfsPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - return plugin.newProvisionerInternal(options) -} - -func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptions) (volume.Provisioner, error) { - return &glusterfsVolumeProvisioner{ - glusterfsMounter: &glusterfsMounter{ - glusterfs: &glusterfs{ - plugin: plugin, - }, - }, - options: options, - }, nil -} - -type provisionerConfig struct { - url string - user string - userKey string - secretNamespace string - secretName string - secretValue string `datapolicy:"token"` - clusterID string - gidMin int - gidMax int - volumeType gapi.VolumeDurabilityInfo - volumeOptions []string - volumeNamePrefix string - thinPoolSnapFactor float32 - customEpNamePrefix string -} - -type glusterfsVolumeProvisioner struct { - *glusterfsMounter - provisionerConfig - options volume.VolumeOptions -} - -func convertGid(gidString string) (int, error) { - gid64, err := strconv.ParseInt(gidString, 10, 32) - if err != nil { - return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err) - } - if gid64 < 0 { - return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString) - } - - // ParseInt returns a int64, but since we parsed only - // for 32 bit, we can cast to int without loss: - gid := int(gid64) - return gid, nil -} - -func convertVolumeParam(volumeString string) (int, error) { - count, err := strconv.Atoi(volumeString) - if err != nil { - return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err) - } - - if count < 0 { - return 0, fmt.Errorf("negative values are not allowed") - } - return count, nil -} - -func (plugin *glusterfsPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - return plugin.newDeleterInternal(spec) -} - -func (plugin *glusterfsPlugin) newDeleterInternal(spec *volume.Spec) (volume.Deleter, error) { - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs == nil { - return nil, fmt.Errorf("spec.PersistentVolume.Spec.Glusterfs is nil") - } - return &glusterfsVolumeDeleter{ - glusterfsMounter: &glusterfsMounter{ - glusterfs: &glusterfs{ - volName: spec.Name(), - plugin: plugin, - }, - path: spec.PersistentVolume.Spec.Glusterfs.Path, - }, - spec: spec.PersistentVolume, - }, nil -} - -type glusterfsVolumeDeleter struct { - *glusterfsMounter - provisionerConfig - spec *v1.PersistentVolume -} - -func (d *glusterfsVolumeDeleter) GetPath() string { - name := glusterfsPluginName - return d.plugin.host.GetPodVolumeDir(d.glusterfsMounter.glusterfs.pod.UID, utilstrings.EscapeQualifiedName(name), d.glusterfsMounter.glusterfs.volName) -} - -// Traverse the PVs, fetching all the GIDs from those -// in a given storage class, and mark them in the table. -func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error { - kubeClient := plugin.host.GetKubeClient() - if kubeClient == nil { - return fmt.Errorf("failed to get kube client when collecting gids") - } - pvList, err := kubeClient.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) - if err != nil { - return fmt.Errorf("failed to get existing persistent volumes") - } - for _, pv := range pvList.Items { - if storagehelpers.GetPersistentVolumeClass(&pv) != className { - continue - } - pvName := pv.ObjectMeta.Name - gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey] - if !ok { - klog.Warningf("no GID found in pv %v", pvName) - continue - } - gid, err := convertGid(gidStr) - if err != nil { - klog.Errorf("failed to parse gid %s: %v", gidStr, err) - continue - } - _, err = gidTable.Allocate(gid) - if err == ErrConflict { - klog.Warningf("GID %v found in pv %v was already allocated", gid, pvName) - } else if err != nil { - return fmt.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err) - } - } - return nil -} - -// Return the gid table for a storage class. -// - If this is the first time, fill it with all the gids -// used in PVs of this storage class by traversing the PVs. -// - Adapt the range of the table to the current range of the SC. -func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) { - plugin.gidTableLock.Lock() - gidTable, ok := plugin.gidTable[className] - plugin.gidTableLock.Unlock() - - if ok { - err := gidTable.SetRange(min, max) - if err != nil { - return nil, err - } - return gidTable, nil - } - - // create a new table and fill it - newGidTable, err := NewMinMaxAllocator(0, absoluteGidMax) - if err != nil { - return nil, err - } - - // collect gids with the full range - err = plugin.collectGids(className, newGidTable) - if err != nil { - return nil, err - } - - // and only reduce the range afterwards - err = newGidTable.SetRange(min, max) - if err != nil { - return nil, err - } - - // if in the meantime a table appeared, use it - plugin.gidTableLock.Lock() - defer plugin.gidTableLock.Unlock() - gidTable, ok = plugin.gidTable[className] - if ok { - err = gidTable.SetRange(min, max) - if err != nil { - return nil, err - } - return gidTable, nil - } - - plugin.gidTable[className] = newGidTable - return newGidTable, nil -} - -func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { - gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey] - if !ok { - return 0, false, nil - } - gid, err := convertGid(gidStr) - return gid, true, err -} - -func (d *glusterfsVolumeDeleter) Delete() error { - klog.V(2).Infof("delete volume %s", d.glusterfsMounter.path) - volumeName := d.glusterfsMounter.path - volumeID, err := getVolumeID(d.spec, volumeName) - if err != nil { - return fmt.Errorf("failed to get volumeID: %v", err) - } - class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec) - if err != nil { - return err - } - cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient()) - if err != nil { - return err - } - d.provisionerConfig = *cfg - klog.V(4).Infof("deleting volume %q", volumeID) - gid, exists, err := d.getGid() - if err != nil { - klog.Error(err) - } else if exists { - gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax) - if err != nil { - return fmt.Errorf("failed to get gidTable: %v", err) - } - err = gidTable.Release(gid) - if err != nil { - return fmt.Errorf("failed to release gid %v: %v", gid, err) - } - } - cli := filterClient(gcli.NewClient(d.url, d.user, d.secretValue), d.plugin.host.GetFilteredDialOptions()) - if cli == nil { - klog.Errorf("failed to create glusterfs REST client") - return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") - } - err = cli.VolumeDelete(volumeID) - if err != nil { - if dstrings.TrimSpace(err.Error()) != errIDNotFound { - // don't log error details from client calls in events - klog.V(4).Infof("failed to delete volume %s: %v", volumeName, err) - return fmt.Errorf("failed to delete volume: see kube-controller-manager.log for details") - } - klog.V(2).Infof("volume %s not present in heketi, ignoring", volumeName) - } - klog.V(2).Infof("volume %s deleted successfully", volumeName) - - //Deleter takes endpoint and namespace from pv spec. - pvSpec := d.spec.Spec - var dynamicEndpoint, dynamicNamespace string - if pvSpec.ClaimRef == nil { - klog.Errorf("ClaimRef is nil") - return fmt.Errorf("ClaimRef is nil") - } - if pvSpec.ClaimRef.Namespace == "" { - klog.Errorf("namespace is nil") - return fmt.Errorf("namespace is nil") - } - dynamicNamespace = pvSpec.ClaimRef.Namespace - if pvSpec.Glusterfs.EndpointsName != "" { - dynamicEndpoint = pvSpec.Glusterfs.EndpointsName - } - klog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint) - err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint) - if err != nil { - klog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err) - } else { - klog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint) - } - return nil -} - -func filterClient(client *gcli.Client, opts *proxyutil.FilteredDialOptions) *gcli.Client { - if opts == nil { - return client - } - dialer := proxyutil.NewFilteredDialContext(nil, nil, opts) - client.SetClientFunc(func(tlsConfig *tls.Config, checkRedirect gcli.CheckRedirectFunc) (gcli.HttpPerformer, error) { - transport := http.DefaultTransport.(*http.Transport).Clone() - transport.DialContext = dialer - transport.TLSClientConfig = tlsConfig - return &http.Client{Transport: transport, CheckRedirect: checkRedirect}, nil - }) - return client -} - -func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) { - if !volutil.ContainsAllAccessModes(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { - return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) - } - if p.options.PVC.Spec.Selector != nil { - klog.V(4).Infof("not able to parse your claim Selector") - return nil, fmt.Errorf("not able to parse your claim Selector") - } - if volutil.CheckPersistentVolumeClaimModeBlock(p.options.PVC) { - return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName()) - } - klog.V(4).Infof("provision volume with options %v", p.options) - scName := storagehelpers.GetPersistentVolumeClaimClass(p.options.PVC) - cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient()) - if err != nil { - return nil, err - } - p.provisionerConfig = *cfg - - gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax) - if err != nil { - return nil, fmt.Errorf("failed to get gidTable: %v", err) - } - gid, _, err := gidTable.AllocateNext() - if err != nil { - klog.Errorf("failed to reserve GID from table: %v", err) - return nil, fmt.Errorf("failed to reserve GID from table: %v", err) - } - klog.V(2).Infof("allocated GID %d for PVC %s", gid, p.options.PVC.Name) - glusterfs, sizeGiB, volID, err := p.CreateVolume(gid) - if err != nil { - if releaseErr := gidTable.Release(gid); releaseErr != nil { - klog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr) - } - return nil, fmt.Errorf("failed to create volume: %v", err) - } - mode := v1.PersistentVolumeFilesystem - pv := new(v1.PersistentVolume) - pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs - pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy - pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes - pv.Spec.VolumeMode = &mode - if len(pv.Spec.AccessModes) == 0 { - pv.Spec.AccessModes = p.plugin.GetAccessModes() - } - pv.Spec.MountOptions = p.options.MountOptions - gidStr := strconv.FormatInt(int64(gid), 10) - pv.Annotations = map[string]string{ - volutil.VolumeGidAnnotationKey: gidStr, - volutil.VolumeDynamicallyCreatedByKey: heketiAnn, - glusterTypeAnn: "file", - "Description": glusterDescAnn, - heketiVolIDAnn: volID, - } - pv.Spec.Capacity = v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)), - } - return pv, nil -} - -func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersistentVolumeSource, size int, volID string, err error) { - var clusterIDs []string - customVolumeName := "" - epServiceName := "" - kubeClient := p.plugin.host.GetKubeClient() - if kubeClient == nil { - return nil, 0, "", fmt.Errorf("failed to get kube client to update endpoint") - } - if len(p.provisionerConfig.customEpNamePrefix) == 0 { - epServiceName = string(p.options.PVC.UID) - } else { - epServiceName = p.provisionerConfig.customEpNamePrefix + "-" + string(p.options.PVC.UID) - } - epNamespace := p.options.PVC.Namespace - endpoint, service, err := p.createOrGetEndpointService(epNamespace, epServiceName, p.options.PVC) - if err != nil { - klog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) - return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) - } - klog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service) - capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - - // GlusterFS/heketi creates volumes in units of GiB. - sz, err := volumehelpers.RoundUpToGiBInt(capacity) - if err != nil { - return nil, 0, "", err - } - klog.V(2).Infof("create volume of size %dGiB", sz) - if p.url == "" { - return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") - } - cli := filterClient(gcli.NewClient(p.url, p.user, p.secretValue), p.plugin.host.GetFilteredDialOptions()) - if cli == nil { - return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") - } - if p.provisionerConfig.clusterID != "" { - clusterIDs = dstrings.Split(p.clusterID, ",") - klog.V(4).Infof("provided clusterIDs %v", clusterIDs) - } - - if p.provisionerConfig.volumeNamePrefix != "" { - customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID()) - } - gid64 := int64(gid) - snaps := struct { - Enable bool `json:"enable"` - Factor float32 `json:"factor"` - }{ - true, - p.provisionerConfig.thinPoolSnapFactor, - } - volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps} - volume, err := cli.VolumeCreate(volumeReq) - if err != nil { - // don't log error details from client calls in events - klog.V(4).Infof("failed to create volume: %v", err) - return nil, 0, "", fmt.Errorf("failed to create volume: see kube-controller-manager.log for details") - } - klog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name) - volID = volume.Id - dynamicHostIps, err := getClusterNodes(cli, volume.Cluster) - if err != nil { - return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) - } - addrlist := make([]v1.EndpointAddress, len(dynamicHostIps)) - for i, v := range dynamicHostIps { - addrlist[i].IP = v - } - subset := make([]v1.EndpointSubset, 1) - ports := []v1.EndpointPort{{Port: 1, Protocol: "TCP"}} - endpoint.Subsets = subset - endpoint.Subsets[0].Addresses = addrlist - endpoint.Subsets[0].Ports = ports - _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(context.TODO(), endpoint, metav1.UpdateOptions{}) - if err != nil { - deleteErr := cli.VolumeDelete(volume.Id) - if deleteErr != nil { - // don't log error details from client calls in events - klog.V(4).Infof("failed to delete volume: %v, manual deletion of the volume required", deleteErr) - } - klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint) - err = kubeClient.CoreV1().Services(epNamespace).Delete(context.TODO(), epServiceName, metav1.DeleteOptions{}) - if err != nil && errors.IsNotFound(err) { - klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace) - err = nil - } - if err != nil { - klog.Errorf("failed to delete service %s/%s: %v", epNamespace, epServiceName, err) - } - klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", epNamespace, epServiceName) - return nil, 0, "", fmt.Errorf("failed to update endpoint %s: %v", endpoint, err) - } - klog.V(3).Infof("endpoint %s updated successfully", endpoint) - return &v1.GlusterfsPersistentVolumeSource{ - EndpointsName: endpoint.Name, - EndpointsNamespace: &epNamespace, - Path: volume.Name, - ReadOnly: false, - }, sz, volID, nil -} - -// createOrGetEndpointService() makes sure an endpoint and service -// exist for the given namespace, PVC name, endpoint name -// I.e. the endpoint or service is only created -// if it does not exist yet. -func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string, epServiceName string, pvc *v1.PersistentVolumeClaim) (endpoint *v1.Endpoints, service *v1.Service, err error) { - pvcNameOrID := "" - if len(pvc.Name) >= 63 { - pvcNameOrID = string(pvc.UID) - } else { - pvcNameOrID = pvc.Name - } - endpoint = &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: epServiceName, - Labels: map[string]string{ - "gluster.kubernetes.io/provisioned-for-pvc": pvcNameOrID, - }, - }, - } - kubeClient := p.plugin.host.GetKubeClient() - if kubeClient == nil { - return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service") - } - _, err = kubeClient.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoint, metav1.CreateOptions{}) - if err != nil && errors.IsAlreadyExists(err) { - klog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) - err = nil - } - if err != nil { - klog.Errorf("failed to create endpoint: %v", err) - return nil, nil, fmt.Errorf("failed to create endpoint: %v", err) - } - service = &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: epServiceName, - Namespace: namespace, - Labels: map[string]string{ - "gluster.kubernetes.io/provisioned-for-pvc": pvcNameOrID, - }, - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{ - {Protocol: "TCP", Port: 1}}}} - _, err = kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil && errors.IsAlreadyExists(err) { - klog.V(1).Infof("service %s already exist in namespace %s", service, namespace) - err = nil - } - if err != nil { - klog.Errorf("failed to create service: %v", err) - return nil, nil, fmt.Errorf("error creating service: %v", err) - } - return endpoint, service, nil -} - -func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServiceName string) (err error) { - kubeClient := d.plugin.host.GetKubeClient() - if kubeClient == nil { - return fmt.Errorf("failed to get kube client when deleting endpoint service") - } - err = kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), epServiceName, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) - } - klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName) - return nil -} - -// parseSecret finds a given Secret instance and reads user password from it. -func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { - secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient) - if err != nil { - klog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err) - return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err) - } - if len(secretMap) == 0 { - return "", fmt.Errorf("empty secret map") - } - secret := "" - for k, v := range secretMap { - if k == secretKeyName { - return v, nil - } - secret = v - } - - // If not found, the last secret in the map wins as done before - return secret, nil -} - -// getClusterNodes() returns the cluster nodes of a given cluster -func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) { - clusterinfo, err := cli.ClusterInfo(cluster) - if err != nil { - // don't log error details from client calls in events - klog.V(4).Infof("failed to get cluster details: %v", err) - return nil, fmt.Errorf("failed to get cluster details: see kube-controller-manager.log for details") - } - - // For the dynamically provisioned volume, we gather the list of node IPs - // of the cluster on which provisioned volume belongs to, as there can be multiple - // clusters. - for _, node := range clusterinfo.Nodes { - nodeInfo, err := cli.NodeInfo(string(node)) - if err != nil { - // don't log error details from client calls in events - klog.V(4).Infof("failed to get host ipaddress: %v", err) - return nil, fmt.Errorf("failed to get host ipaddress: see kube-controller-manager.log for details") - } - ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "") - // IP validates if a string is a valid IP address. - ip := netutils.ParseIPSloppy(ipaddr) - if ip == nil { - return nil, fmt.Errorf("glusterfs server node ip address %s must be a valid IP address, (e.g. 10.9.8.7)", ipaddr) - } - dynamicHostIps = append(dynamicHostIps, ipaddr) - } - klog.V(3).Infof("host list :%v", dynamicHostIps) - if len(dynamicHostIps) == 0 { - return nil, fmt.Errorf("no hosts found: %v", err) - } - return dynamicHostIps, nil -} - -// parseClassParameters parses StorageClass parameters. -func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) { - var cfg provisionerConfig - var err error - cfg.gidMin = defaultGidMin - cfg.gidMax = defaultGidMax - cfg.customEpNamePrefix = dynamicEpSvcPrefix - - authEnabled := true - parseVolumeType := "" - parseVolumeOptions := "" - parseVolumeNamePrefix := "" - parseThinPoolSnapFactor := "" - - //thin pool snap factor default to 1.0 - cfg.thinPoolSnapFactor = float32(1.0) - - for k, v := range params { - switch dstrings.ToLower(k) { - case "resturl": - cfg.url = v - case "restuser": - cfg.user = v - case "restuserkey": - cfg.userKey = v - case "secretname": - cfg.secretName = v - case "secretnamespace": - cfg.secretNamespace = v - case "clusterid": - if len(v) != 0 { - cfg.clusterID = v - } - case "restauthenabled": - authEnabled = dstrings.ToLower(v) == "true" - case "gidmin": - parseGidMin, err := convertGid(v) - if err != nil { - return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName) - } - if parseGidMin < absoluteGidMin { - return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin) - } - if parseGidMin > absoluteGidMax { - return nil, fmt.Errorf("gidMin must be <= %v", absoluteGidMax) - } - cfg.gidMin = parseGidMin - case "gidmax": - parseGidMax, err := convertGid(v) - if err != nil { - return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName) - } - if parseGidMax < absoluteGidMin { - return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin) - } - if parseGidMax > absoluteGidMax { - return nil, fmt.Errorf("gidMax must be <= %v", absoluteGidMax) - } - cfg.gidMax = parseGidMax - case "volumetype": - parseVolumeType = v - - case "volumeoptions": - if len(v) != 0 { - parseVolumeOptions = v - } - case "volumenameprefix": - if len(v) != 0 { - parseVolumeNamePrefix = v - } - case "snapfactor": - if len(v) != 0 { - parseThinPoolSnapFactor = v - } - case "customepnameprefix": - // If the string has > 'maxCustomEpNamePrefixLen' chars, the final endpoint name will - // exceed the limitation of 63 chars, so fail if prefix is > 'maxCustomEpNamePrefixLen' - // characters. This is only applicable for 'customepnameprefix' string and default ep name - // string will always pass. - if len(v) <= maxCustomEpNamePrefixLen { - cfg.customEpNamePrefix = v - } else { - return nil, fmt.Errorf("'customepnameprefix' value should be < %d characters", maxCustomEpNamePrefixLen) - } - default: - return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName) - } - } - if len(cfg.url) == 0 { - return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", glusterfsPluginName) - } - if len(parseVolumeType) == 0 { - cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}} - } else { - parseVolumeTypeInfo := dstrings.Split(parseVolumeType, ":") - switch parseVolumeTypeInfo[0] { - case "replicate": - if len(parseVolumeTypeInfo) >= 2 { - newReplicaCount, err := convertVolumeParam(parseVolumeTypeInfo[1]) - if err != nil { - return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err) - } - cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}} - } else { - cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}} - } - case "disperse": - if len(parseVolumeTypeInfo) >= 3 { - newDisperseData, err := convertVolumeParam(parseVolumeTypeInfo[1]) - if err != nil { - return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err) - } - newDisperseRedundancy, err := convertVolumeParam(parseVolumeTypeInfo[2]) - if err != nil { - return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[2], err) - } - cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}} - } else { - return nil, fmt.Errorf("StorageClass for provisioner %q must have data:redundancy count set for disperse volumes in storage class option '%s'", glusterfsPluginName, "volumetype") - } - case "none": - cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityDistributeOnly} - default: - return nil, fmt.Errorf("error parsing value for option 'volumetype' for volume plugin %s", glusterfsPluginName) - } - } - if !authEnabled { - cfg.user = "" - cfg.secretName = "" - cfg.secretNamespace = "" - cfg.userKey = "" - cfg.secretValue = "" - } - - if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 { - // secretName + Namespace has precedence over userKey - if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 { - cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient) - if err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("StorageClass for provisioner %q must have secretNamespace and secretName either both set or both empty", glusterfsPluginName) - } - } else { - cfg.secretValue = cfg.userKey - } - if cfg.gidMin > cfg.gidMax { - return nil, fmt.Errorf("storageClass for provisioner %q must have gidMax value >= gidMin", glusterfsPluginName) - } - if len(parseVolumeOptions) != 0 { - volOptions := dstrings.Split(parseVolumeOptions, ",") - if len(volOptions) == 0 { - return nil, fmt.Errorf("storageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName) - } - cfg.volumeOptions = volOptions - } - if len(parseVolumeNamePrefix) != 0 { - if dstrings.Contains(parseVolumeNamePrefix, "_") { - return nil, fmt.Errorf("storageclass parameter 'volumenameprefix' should not contain '_' in its value") - } - cfg.volumeNamePrefix = parseVolumeNamePrefix - } - if len(parseThinPoolSnapFactor) != 0 { - thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32) - if err != nil { - return nil, fmt.Errorf("failed to convert snapfactor %v to float: %v", parseThinPoolSnapFactor, err) - } - if thinPoolSnapFactor < 1.0 || thinPoolSnapFactor > 100.0 { - return nil, fmt.Errorf("invalid snapshot factor %v, the value must be between 1 to 100", thinPoolSnapFactor) - } - cfg.thinPoolSnapFactor = float32(thinPoolSnapFactor) - } - return &cfg, nil -} - -// getVolumeID returns volumeID from the PV or volumename. -func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) { - volumeID := "" - - // Get volID from pvspec if available, else fill it from volumename. - if pv != nil { - if pv.Annotations[heketiVolIDAnn] != "" { - volumeID = pv.Annotations[heketiVolIDAnn] - } else { - volumeID = dstrings.TrimPrefix(volumeName, volPrefix) - } - } else { - return volumeID, fmt.Errorf("provided PV spec is nil") - } - if volumeID == "" { - return volumeID, fmt.Errorf("volume ID is empty") - } - return volumeID, nil -} - -func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { - pvSpec := spec.PersistentVolume.Spec - volumeName := pvSpec.Glusterfs.Path - klog.V(2).Infof("received request to expand volume %s", volumeName) - volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) - if err != nil { - return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err) - } - //Get details of StorageClass. - class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume) - if err != nil { - return oldSize, err - } - cfg, err := parseClassParameters(class.Parameters, plugin.host.GetKubeClient()) - if err != nil { - return oldSize, err - } - klog.V(4).Infof("expanding volume: %q", volumeID) - - //Create REST server connection - cli := filterClient(gcli.NewClient(cfg.url, cfg.user, cfg.secretValue), plugin.host.GetFilteredDialOptions()) - if cli == nil { - klog.Errorf("failed to create glusterfs REST client") - return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") - } - - // Find out delta size - expansionSize := newSize - expansionSize.Sub(oldSize) - expansionSizeGiB, err := volumehelpers.RoundUpToGiBInt(expansionSize) - if err != nil { - return oldSize, err - } - - // Find out requested Size - requestGiB, err := volumehelpers.RoundUpToGiB(newSize) - if err != nil { - return oldSize, err - } - - //Check the existing volume size - currentVolumeInfo, err := cli.VolumeInfo(volumeID) - if err != nil { - // don't log error details from client calls in events - klog.V(4).Infof("error when fetching details of volume %s: %v", volumeName, err) - return oldSize, fmt.Errorf("failed to get volume info %s: see kube-controller-manager.log for details", volumeName) - } - if int64(currentVolumeInfo.Size) >= requestGiB { - return newSize, nil - } - - // Make volume expansion request - volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGiB} - - // Expand the volume - volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq) - if err != nil { - // don't log error details from client calls in events - klog.V(4).Infof("failed to expand volume %s: %v", volumeName, err) - return oldSize, fmt.Errorf("failed to expand volume: see kube-controller-manager.log for details") - } - klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size) - newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size)) - return newVolumeSize, nil -} diff --git a/pkg/volume/glusterfs/glusterfs_minmax.go b/pkg/volume/glusterfs/glusterfs_minmax.go deleted file mode 100644 index f99bdc71486..00000000000 --- a/pkg/volume/glusterfs/glusterfs_minmax.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// This implementation is space-efficient for a sparse -// allocation over a big range. Could be optimized -// for high absolute allocation number with a bitmap. -// - -package glusterfs - -import ( - "errors" - "sync" -) - -var ( - //ErrConflict returned when value is already in use. - ErrConflict = errors.New("number already allocated") - - //ErrInvalidRange returned invalid range, for eg# min > max - ErrInvalidRange = errors.New("invalid range") - - //ErrOutOfRange returned when value is not in pool range. - ErrOutOfRange = errors.New("out of range") - - //ErrRangeFull returned when no more free values in the pool. - ErrRangeFull = errors.New("range full") - - //ErrInternal returned when no free item found, but a.free != 0. - ErrInternal = errors.New("internal error") -) - -// MinMaxAllocator defines allocator struct. -type MinMaxAllocator struct { - lock sync.Mutex - min int - max int - free int - used map[int]bool -} - -var _ Rangeable = &MinMaxAllocator{} - -// Rangeable is an Interface that can adjust its min/max range. -// Rangeable should be threadsafe -type Rangeable interface { - Allocate(int) (bool, error) - AllocateNext() (int, bool, error) - Release(int) error - Has(int) bool - Free() int - SetRange(min, max int) error -} - -// NewMinMaxAllocator return a new allocator or error based on provided min/max value. -func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) { - if min > max { - return nil, ErrInvalidRange - } - return &MinMaxAllocator{ - min: min, - max: max, - free: 1 + max - min, - used: map[int]bool{}, - }, nil -} - -// SetRange defines the range/pool with provided min and max values. -func (a *MinMaxAllocator) SetRange(min, max int) error { - if min > max { - return ErrInvalidRange - } - - a.lock.Lock() - defer a.lock.Unlock() - - // Check if we need to change - if a.min == min && a.max == max { - return nil - } - - a.min = min - a.max = max - - // Recompute how many free we have in the range - numUsed := 0 - for i := range a.used { - if a.inRange(i) { - numUsed++ - } - } - a.free = 1 + max - min - numUsed - - return nil -} - -// Allocate allocates provided value in the allocator and mark it as used. -func (a *MinMaxAllocator) Allocate(i int) (bool, error) { - a.lock.Lock() - defer a.lock.Unlock() - - if !a.inRange(i) { - return false, ErrOutOfRange - } - - if a.has(i) { - return false, ErrConflict - } - - a.used[i] = true - a.free-- - - return true, nil -} - -// AllocateNext allocates next value from the allocator. -func (a *MinMaxAllocator) AllocateNext() (int, bool, error) { - a.lock.Lock() - defer a.lock.Unlock() - - // Fast check if we're out of items - if a.free <= 0 { - return 0, false, ErrRangeFull - } - - // Scan from the minimum until we find a free item - for i := a.min; i <= a.max; i++ { - if !a.has(i) { - a.used[i] = true - a.free-- - return i, true, nil - } - } - - // no free item found, but a.free != 0 - return 0, false, ErrInternal -} - -// Release free/delete provided value from the allocator. -func (a *MinMaxAllocator) Release(i int) error { - a.lock.Lock() - defer a.lock.Unlock() - - if !a.has(i) { - return nil - } - - delete(a.used, i) - - if a.inRange(i) { - a.free++ - } - - return nil -} - -func (a *MinMaxAllocator) has(i int) bool { - _, ok := a.used[i] - return ok -} - -// Has check whether the provided value is used in the allocator -func (a *MinMaxAllocator) Has(i int) bool { - a.lock.Lock() - defer a.lock.Unlock() - - return a.has(i) -} - -// Free returns the number of free values in the allocator. -func (a *MinMaxAllocator) Free() int { - a.lock.Lock() - defer a.lock.Unlock() - return a.free -} - -func (a *MinMaxAllocator) inRange(i int) bool { - return a.min <= i && i <= a.max -} diff --git a/pkg/volume/glusterfs/glusterfs_minmax_test.go b/pkg/volume/glusterfs/glusterfs_minmax_test.go deleted file mode 100644 index 48989821af3..00000000000 --- a/pkg/volume/glusterfs/glusterfs_minmax_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package glusterfs - -import ( - "testing" -) - -func TestNewFree(t *testing.T) { - min := 1 - max := 10 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - if f := m.Free(); f != (max - min + 1) { - t.Errorf("expect to get %d free, but got %d", (max - min + 1), f) - } -} - -func TestNewInvalidRange(t *testing.T) { - if _, err := NewMinMaxAllocator(10, 1); err != ErrInvalidRange { - t.Errorf("expect to get Error '%v', got '%v'", ErrInvalidRange, err) - } -} - -func TestSetRange(t *testing.T) { - min := 1 - max := 10 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - if err = m.SetRange(10, 1); err != ErrInvalidRange { - t.Errorf("expected to get error '%v', got '%v'", ErrInvalidRange, err) - } - - if err = m.SetRange(1, 2); err != nil { - t.Errorf("error setting range: '%v'", err) - } - - if f := m.Free(); f != 2 { - t.Errorf("expect to get %d free, but got %d", 2, f) - } - - if ok, _ := m.Allocate(1); !ok { - t.Errorf("error allocate offset %v", 1) - } - - if f := m.Free(); f != 1 { - t.Errorf("expect to get 1 free, but got %d", f) - } - - if err = m.SetRange(1, 1); err != nil { - t.Errorf("error setting range: '%v'", err) - } - - if f := m.Free(); f != 0 { - t.Errorf("expect to get 0 free, but got %d", f) - } - - if err = m.SetRange(2, 2); err != nil { - t.Errorf("error setting range: '%v'", err) - } - - if f := m.Free(); f != 1 { - t.Errorf("expect to get 1 free, but got %d", f) - } -} - -func TestAllocateNext(t *testing.T) { - min := 1 - max := 10 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - el, ok, _ := m.AllocateNext() - if !ok { - t.Fatalf("unexpected error") - } - - if !m.Has(el) { - t.Errorf("expect element %v allocated", el) - } - - if f := m.Free(); f != (max-min+1)-1 { - t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f) - } -} - -func TestAllocateMax(t *testing.T) { - min := 1 - max := 10 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - for i := 1; i <= max; i++ { - if _, ok, _ := m.AllocateNext(); !ok { - t.Fatalf("unexpected error") - } - } - - if _, ok, _ := m.AllocateNext(); ok { - t.Errorf("unexpected success") - } - - if f := m.Free(); f != 0 { - t.Errorf("expect to get %d free, but got %d", 0, f) - } -} - -func TestAllocate(t *testing.T) { - min := 1 - max := 10 - offset := 3 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - if ok, err := m.Allocate(offset); !ok { - t.Errorf("error allocate offset %v: %v", offset, err) - } - - if !m.Has(offset) { - t.Errorf("expect element %v allocated", offset) - } - - if f := m.Free(); f != (max-min+1)-1 { - t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f) - } -} - -func TestAllocateConflict(t *testing.T) { - min := 1 - max := 10 - offset := 3 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - if ok, err := m.Allocate(offset); !ok { - t.Errorf("error allocate offset %v: %v", offset, err) - } - - ok, err := m.Allocate(offset) - if ok { - t.Errorf("unexpected success") - } - if err != ErrConflict { - t.Errorf("expected error '%v', got '%v'", ErrConflict, err) - } -} - -func TestAllocateOutOfRange(t *testing.T) { - min := 1 - max := 10 - offset := 11 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - ok, err := m.Allocate(offset) - if ok { - t.Errorf("unexpected success") - } - if err != ErrOutOfRange { - t.Errorf("expected error '%v', got '%v'", ErrOutOfRange, err) - } -} - -func TestRelease(t *testing.T) { - min := 1 - max := 10 - offset := 3 - - m, err := NewMinMaxAllocator(min, max) - if err != nil { - t.Errorf("error creating new allocator: '%v'", err) - } - - if ok, err := m.Allocate(offset); !ok { - t.Errorf("error allocate offset %v: %v", offset, err) - } - - if !m.Has(offset) { - t.Errorf("expect offset %v allocated", offset) - } - - if err = m.Release(offset); err != nil { - t.Errorf("unexpected error: %v", err) - } - - if m.Has(offset) { - t.Errorf("expect offset %v not allocated", offset) - } -} diff --git a/pkg/volume/glusterfs/glusterfs_test.go b/pkg/volume/glusterfs/glusterfs_test.go deleted file mode 100644 index 4b440636077..00000000000 --- a/pkg/volume/glusterfs/glusterfs_test.go +++ /dev/null @@ -1,764 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package glusterfs - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - - gapi "github.com/heketi/heketi/pkg/glusterfs/api" - "k8s.io/mount-utils" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/fake" - core "k8s.io/client-go/testing" - utiltesting "k8s.io/client-go/util/testing" - "k8s.io/kubernetes/pkg/volume" - volumetest "k8s.io/kubernetes/pkg/volume/testing" -) - -func TestCanSupport(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil)) - plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs") - if err != nil { - t.Fatal("Can't find the plugin by name") - } - if plug.GetPluginName() != "kubernetes.io/glusterfs" { - t.Errorf("Wrong name: %s", plug.GetPluginName()) - } - if plug.CanSupport(&volume.Spec{}) { - t.Errorf("Expected false") - } - if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) { - t.Errorf("Expected false") - } - if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{}}}}) { - t.Errorf("Expected true") - } - if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) { - t.Errorf("Expected false") - } - if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) { - t.Errorf("Expected false") - } - if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Glusterfs: &v1.GlusterfsPersistentVolumeSource{}}}}}) { - t.Errorf("Expected true") - } -} - -func TestGetAccessModes(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil)) - - plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs") - if err != nil { - t.Errorf("Can't find the plugin by name") - } - if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) { - t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany) - } -} - -func doTestPlugin(t *testing.T, spec *volume.Spec) { - tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil)) - plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs") - if err != nil { - t.Errorf("Can't find the plugin by name") - } - ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}} - pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} - mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, mount.NewFakeMounter(nil)) - volumePath := mounter.GetPath() - if err != nil { - t.Errorf("Failed to make a new Mounter: %v", err) - } - if mounter == nil { - t.Error("Got a nil Mounter") - } - expectedPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~glusterfs/vol1") - if volumePath != expectedPath { - t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath) - } - if err := mounter.SetUp(volume.MounterArgs{}); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(volumePath); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", volumePath) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil)) - if err != nil { - t.Errorf("Failed to make a new Unmounter: %v", err) - } - if unmounter == nil { - t.Error("Got a nil Unmounter") - } - if err := unmounter.TearDown(); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(volumePath); err == nil { - t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) - } else if !os.IsNotExist(err) { - t.Errorf("TearDown() failed: %v", err) - } -} - -func TestPluginVolume(t *testing.T) { - vol := &v1.Volume{ - Name: "vol1", - VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}}, - } - doTestPlugin(t, volume.NewSpecFromVolume(vol)) -} - -func TestPluginPersistentVolume(t *testing.T) { - vol := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vol1", - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - Glusterfs: &v1.GlusterfsPersistentVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, - }, - }, - } - - doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) -} - -func TestPersistentClaimReadOnlyFlag(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvA", - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - Glusterfs: &v1.GlusterfsPersistentVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, - }, - ClaimRef: &v1.ObjectReference{ - Name: "claimA", - }, - }, - } - - claim := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "claimA", - Namespace: "nsA", - }, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "pvA", - }, - Status: v1.PersistentVolumeClaimStatus{ - Phase: v1.ClaimBound, - }, - } - - ep := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nsA", - Name: "ep", - }, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}}, - }}, - } - - client := fake.NewSimpleClientset(pv, claim, ep) - - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, client, nil)) - plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) - - // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes - spec := volume.NewSpecFromPersistentVolume(pv, true) - pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}} - mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) - - if !mounter.GetAttributes().ReadOnly { - t.Errorf("Expected true for mounter.IsReadOnly") - } -} - -func TestParseClassParameters(t *testing.T) { - secret := v1.Secret{ - Type: "kubernetes.io/glusterfs", - Data: map[string][]byte{ - "data": []byte("mypassword"), - }, - } - tests := []struct { - name string - parameters map[string]string - secret *v1.Secret - expectError bool - expectConfig *provisionerConfig - }{ - { - "password", - map[string]string{ - "resturl": "https://localhost:8080", - "restuser": "admin", - "restuserkey": "password", - }, - nil, // secret - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - user: "admin", - userKey: "password", - secretValue: "password", - gidMin: 2000, - gidMax: 2147483647, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "secret", - map[string]string{ - "resturl": "https://localhost:8080", - "restuser": "admin", - "secretname": "mysecret", - "secretnamespace": "default", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - user: "admin", - secretName: "mysecret", - secretNamespace: "default", - secretValue: "mypassword", - gidMin: 2000, - gidMax: 2147483647, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "no authentication", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 2000, - gidMax: 2147483647, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "missing secret", - map[string]string{ - "resturl": "https://localhost:8080", - "secretname": "mysecret", - "secretnamespace": "default", - }, - nil, // secret - true, // expect error - nil, - }, - { - "secret with no namespace", - map[string]string{ - "resturl": "https://localhost:8080", - "secretname": "mysecret", - }, - &secret, - true, // expect error - nil, - }, - { - "missing url", - map[string]string{ - "restuser": "admin", - "restuserkey": "password", - }, - nil, // secret - true, // expect error - nil, - }, - { - "unknown parameter", - map[string]string{ - "unknown": "yes", - "resturl": "https://localhost:8080", - "restuser": "admin", - "restuserkey": "password", - }, - nil, // secret - true, // expect error - nil, - }, - { - "invalid gidMin #1", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "0", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid gidMin #2", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "1999", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid gidMin #3", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "1999", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid gidMax #1", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMax": "0", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid gidMax #2", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMax": "1999", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid gidMax #3", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMax": "1999", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid gidMin:gidMax", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "5001", - "gidMax": "5000", - }, - &secret, - true, // expect error - nil, - }, - { - "valid gidMin", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 2147483647, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "valid gidMax", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMax": "5000", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 2000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "valid gidMin:gidMax", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - - { - "valid volumetype: replicate", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "replicate:4", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - - { - "valid volumetype: disperse", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "disperse:4:2", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "valid snapfactor: 50", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "disperse:4:2", - "snapfactor": "50", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}}, - thinPoolSnapFactor: float32(50), - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - - { - "valid volumenameprefix: dept-dev", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "disperse:4:2", - "snapfactor": "50", - "volumenameprefix": "dept-dev", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}}, - thinPoolSnapFactor: float32(50), - volumeNamePrefix: "dept-dev", - customEpNamePrefix: "glusterfs-dynamic", - }, - }, - { - "invalid volumetype (disperse) parameter", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "volumetype": "disperse:4:asd", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid volumetype (replicate) parameter", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "volumetype": "replicate:asd", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid volumetype: unknown volumetype", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "volumetype": "dispersereplicate:4:2", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid volumetype : negative value", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "volumetype": "replicate:-1000", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid thinPoolSnapFactor: value out of range", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "snapfactor": "0.5", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid volumenameprefix: string starting with '_'", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "volumenameprefix": "_", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid volumenameprefix: string with '_'", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "volumenameprefix": "qe_dept", - }, - &secret, - true, // expect error - nil, - }, - { - "invalid thinPoolSnapFactor: value out of range", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "snapfactor": "120", - }, - &secret, - true, // expect error - nil, - }, - - { - "enable custom ep/svc name: customEpNamePrefix: myprefix", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "replicate:4", - "customEpNamePrefix": "myprefix", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "myprefix", - }, - }, - { - "empty custom ep/svc name: customEpNamePrefix:''", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "replicate:4", - "customEpNamePrefix": "", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "", - }, - }, - { - "custom ep/svc name with 26 chars: customEpNamePrefix:'charstringhastwentysixchar'", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "replicate:4", - "customEpNamePrefix": "charstringhastwentysixchar", - }, - &secret, - false, // expect error - &provisionerConfig{ - url: "https://localhost:8080", - gidMin: 4000, - gidMax: 5000, - volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}}, - thinPoolSnapFactor: float32(1.0), - customEpNamePrefix: "charstringhastwentysixchar", - }, - }, - { - "invalid customepnameprefix ( ie >26 chars) parameter", - map[string]string{ - "resturl": "https://localhost:8080", - "restauthenabled": "false", - "gidMin": "4000", - "gidMax": "5000", - "volumetype": "replicate:4", - "customEpNamePrefix": "myprefixhasmorethan26characters", - }, - &secret, - true, // expect error - nil, - }, - } - - for _, test := range tests { - - client := &fake.Clientset{} - client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if test.secret != nil { - return true, test.secret, nil - } - return true, nil, fmt.Errorf("test %s did not set a secret", test.name) - }) - - cfg, err := parseClassParameters(test.parameters, client) - - if err != nil && !test.expectError { - t.Errorf("Test %s got unexpected error %v", test.name, err) - } - if err == nil && test.expectError { - t.Errorf("test %s expected error and got none", test.name) - } - if test.expectConfig != nil { - if !reflect.DeepEqual(cfg, test.expectConfig) { - t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg) - } - } - } -} diff --git a/pkg/volume/glusterfs/glusterfs_util.go b/pkg/volume/glusterfs/glusterfs_util.go deleted file mode 100644 index 62c1ef36805..00000000000 --- a/pkg/volume/glusterfs/glusterfs_util.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package glusterfs - -import ( - "bufio" - "fmt" - "os" - - "k8s.io/klog/v2" -) - -// readGlusterLog will take the last 2 lines of the log file -// on failure of gluster SetUp and return those so kubelet can -// properly expose them -// return error on any failure -func readGlusterLog(path string, podName string) error { - - var line1 string - var line2 string - linecount := 0 - - klog.Infof("failure, now attempting to read the gluster log for pod %s", podName) - - // Check and make sure path exists - if len(path) == 0 { - return fmt.Errorf("log file does not exist for pod %s", podName) - } - - // open the log file - file, err := os.Open(path) - if err != nil { - return fmt.Errorf("could not open log file for pod %s", podName) - } - defer file.Close() - - // read in and scan the file using scanner - // from stdlib - fscan := bufio.NewScanner(file) - - // rather than guessing on bytes or using Seek - // going to scan entire file and take the last two lines - // generally the file should be small since it is pod specific - for fscan.Scan() { - if linecount > 0 { - line1 = line2 - } - line2 = "\n" + fscan.Text() - - linecount++ - } - - if linecount > 0 { - return fmt.Errorf("%v", line1+line2+"\n") - } - return nil -} diff --git a/test/e2e/common/storage/volumes.go b/test/e2e/common/storage/volumes.go index 1833be74e1a..ecb005e9663 100644 --- a/test/e2e/common/storage/volumes.go +++ b/test/e2e/common/storage/volumes.go @@ -43,18 +43,14 @@ limitations under the License. package storage import ( - "context" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" + admissionapi "k8s.io/pod-security-admission/api" ) // TODO(#99468): Check if these tests are still needed. @@ -123,39 +119,4 @@ var _ = SIGDescribe("Volumes", func() { e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) }) }) - - //////////////////////////////////////////////////////////////////////// - // Gluster - //////////////////////////////////////////////////////////////////////// - ginkgo.Describe("GlusterFS", func() { - ginkgo.It("should be mountable", func() { - // create gluster server and endpoints - config, _, _ := e2evolume.NewGlusterfsServer(c, namespace.Name) - name := config.Prefix + "-server" - defer func() { - e2evolume.TestServerCleanup(f, config) - err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if !apierrors.IsNotFound(err) { - framework.ExpectNoError(err, "defer: Gluster delete endpoints failed") - } - }() - - tests := []e2evolume.Test{ - { - Volume: v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: name, - // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh - Path: "test_vol", - ReadOnly: true, - }, - }, - File: "index.html", - // Must match content of test/images/volumes-tester/gluster/index.html - ExpectedContent: "Hello from GlusterFS!", - }, - } - e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) - }) - }) }) diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 7278de48f3a..15c1b654438 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -166,65 +166,6 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf return config, pod, host } -// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object. -func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) { - config = TestConfig{ - Namespace: namespace, - Prefix: "gluster", - ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer), - ServerPorts: []int{24007, 24008, 49152}, - } - pod, ip = CreateStorageServer(cs, config) - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-server", - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Protocol: v1.ProtocolTCP, - Port: 24007, - }, - }, - }, - } - - _, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) - framework.ExpectNoError(err, "failed to create service for Gluster server") - - ginkgo.By("creating Gluster endpoints") - endpoints := &v1.Endpoints{ - TypeMeta: metav1.TypeMeta{ - Kind: "Endpoints", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-server", - }, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{ - { - IP: ip, - }, - }, - Ports: []v1.EndpointPort{ - { - Name: "gluster", - Port: 24007, - Protocol: v1.ProtocolTCP, - }, - }, - }, - }, - } - _, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) - framework.ExpectNoError(err, "failed to create endpoints for Gluster server") - - return config, pod, ip -} - // CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer // and ip address string are returned. // Note: Expect() is called so no error is returned. diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index e8cb7a84951..a1f2f4afc0b 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -46,7 +46,6 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -219,143 +218,6 @@ func (v *nfsVolume) DeleteVolume() { cleanUpVolumeServer(v.f, v.serverPod) } -// Gluster -type glusterFSDriver struct { - driverInfo storageframework.DriverInfo -} - -type glusterVolume struct { - prefix string - serverPod *v1.Pod - f *framework.Framework -} - -var _ storageframework.TestDriver = &glusterFSDriver{} -var _ storageframework.PreprovisionedVolumeTestDriver = &glusterFSDriver{} -var _ storageframework.InlineVolumeTestDriver = &glusterFSDriver{} -var _ storageframework.PreprovisionedPVTestDriver = &glusterFSDriver{} - -// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface -func InitGlusterFSDriver() storageframework.TestDriver { - return &glusterFSDriver{ - driverInfo: storageframework.DriverInfo{ - Name: "gluster", - InTreePluginName: "kubernetes.io/glusterfs", - MaxFileSize: storageframework.FileSizeMedium, - SupportedSizeRange: e2evolume.SizeRange{ - Min: "1Gi", - }, - SupportedFsType: sets.NewString( - "", // Default fsType - ), - Capabilities: map[storageframework.Capability]bool{ - storageframework.CapPersistence: true, - storageframework.CapExec: true, - storageframework.CapRWX: true, - storageframework.CapMultiPODs: true, - }, - }, - } -} - -func (g *glusterFSDriver) GetDriverInfo() *storageframework.DriverInfo { - return &g.driverInfo -} - -func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { - e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") -} - -func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { - gv, ok := e2evolume.(*glusterVolume) - if !ok { - framework.Failf("failed to cast test volume type %T to the Gluster test volume", e2evolume) - } - - name := gv.prefix + "-server" - return &v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: name, - // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh - Path: "test_vol", - ReadOnly: readOnly, - }, - } -} - -func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gv, ok := e2evolume.(*glusterVolume) - if !ok { - framework.Failf("failed to cast test volume of type %T to the Gluster test volume", e2evolume) - } - - name := gv.prefix + "-server" - return &v1.PersistentVolumeSource{ - Glusterfs: &v1.GlusterfsPersistentVolumeSource{ - EndpointsName: name, - // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh - Path: "test_vol", - ReadOnly: readOnly, - }, - }, nil -} - -func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { - return &storageframework.PerTestConfig{ - Driver: g, - Prefix: "gluster", - Framework: f, - }, func() {} -} - -func (g *glusterFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { - f := config.Framework - cs := f.ClientSet - ns := f.Namespace - - c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name) - config.ServerConfig = &c - return &glusterVolume{ - prefix: config.Prefix, - serverPod: serverPod, - f: f, - } -} - -func (v *glusterVolume) DeleteVolume() { - f := v.f - cs := f.ClientSet - ns := f.Namespace - - name := v.prefix + "-server" - - nameSpaceName := fmt.Sprintf("%s/%s", ns.Name, name) - - framework.Logf("Deleting Gluster endpoints %s...", nameSpaceName) - err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - framework.Failf("Gluster deleting endpoint %s failed: %v", nameSpaceName, err) - } - framework.Logf("Gluster endpoints %q not found, assuming deleted", nameSpaceName) - } - - framework.Logf("Deleting Gluster service %s...", nameSpaceName) - err = cs.CoreV1().Services(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - framework.Failf("Gluster deleting service %s failed: %v", nameSpaceName, err) - } - framework.Logf("Gluster service %q not found, assuming deleted", nameSpaceName) - } - - framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name) - err = e2epod.DeletePodWithWait(cs, v.serverPod) - if err != nil { - framework.Failf("Gluster server pod delete failed: %v", err) - } -} - // iSCSI // The iscsiadm utility and iscsi target kernel modules must be installed on all nodes. type iSCSIDriver struct { diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index 1118f4114c8..d9334969ed4 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -30,7 +30,6 @@ import ( // List of testDrivers to be executed in below loop var testDrivers = []func() storageframework.TestDriver{ drivers.InitNFSDriver, - drivers.InitGlusterFSDriver, drivers.InitISCSIDriver, drivers.InitRbdDriver, drivers.InitCephFSDriver, diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 17a00a89c93..f6a2de8e29e 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -19,7 +19,6 @@ package storage import ( "context" "fmt" - "net" "strings" "time" @@ -51,7 +50,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) @@ -680,37 +678,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) }) - ginkgo.Describe("GlusterDynamicProvisioner", func() { - ginkgo.It("should create and delete persistent volumes [fast]", func() { - e2eskipper.SkipIfProviderIs("gke") - ginkgo.By("creating a Gluster DP server Pod") - pod := startGlusterDpServerPod(c, ns) - serverURL := "http://" + net.JoinHostPort(pod.Status.PodIP, "8081") - ginkgo.By("creating a StorageClass") - test := testsuites.StorageClassTest{ - Client: c, - Name: "Gluster Dynamic provisioner test", - Provisioner: "kubernetes.io/glusterfs", - Timeouts: f.Timeouts, - ClaimSize: "2Gi", - ExpectedSize: "2Gi", - Parameters: map[string]string{"resturl": serverURL}, - } - storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "glusterdptest")) - defer clearStorageClass() - test.Class = storageClass - - ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") - test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ - ClaimSize: test.ClaimSize, - StorageClassName: &test.Class.Name, - VolumeMode: &test.VolumeMode, - }, ns) - - test.TestDynamicProvisioning() - }) - }) - ginkgo.Describe("Invalid AWS KMS key", func() { ginkgo.It("should report an error and create no PV", func() { e2eskipper.SkipUnlessProviderIs("aws") @@ -880,55 +847,6 @@ func getStorageClass( } } -func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod { - podClient := c.CoreV1().Pods(ns) - - provisionerPod := &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "glusterdynamic-provisioner-", - }, - - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "glusterdynamic-provisioner", - Image: imageutils.GetE2EImage(imageutils.GlusterDynamicProvisioner), - Args: []string{ - "-config=" + "/etc/heketi/heketi.json", - }, - Ports: []v1.ContainerPort{ - {Name: "heketi", ContainerPort: 8081}, - }, - Env: []v1.EnvVar{ - { - Name: "POD_IP", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - FieldPath: "status.podIP", - }, - }, - }, - }, - ImagePullPolicy: v1.PullIfNotPresent, - }, - }, - }, - } - provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{}) - framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) - - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) - - ginkgo.By("locating the provisioner pod") - pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{}) - framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) - return pod -} - // waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's // StorageClass. Returns either an error and nil values or the remaining PVs and their count. func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) { diff --git a/test/images/volume/gluster/BASEIMAGE b/test/images/volume/gluster/BASEIMAGE deleted file mode 100644 index 9bde7eca43f..00000000000 --- a/test/images/volume/gluster/BASEIMAGE +++ /dev/null @@ -1,4 +0,0 @@ -linux/amd64=fedora:36 -linux/arm64=arm64v8/fedora:36 -linux/ppc64le=ppc64le/fedora:36 -linux/s390x=s390x/fedora:36 diff --git a/test/images/volume/gluster/Dockerfile b/test/images/volume/gluster/Dockerfile deleted file mode 100644 index cbbcb71efb4..00000000000 --- a/test/images/volume/gluster/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG BASEIMAGE -FROM $BASEIMAGE - -CROSS_BUILD_COPY qemu-QEMUARCH-static /usr/bin/ - -RUN yum -y install hostname glusterfs-server && yum clean all -ADD glusterd.vol /etc/glusterfs/ -ADD run_gluster.sh /usr/local/bin/ -ADD index.html /vol/ -RUN chmod 644 /vol/index.html - -EXPOSE 24007/tcp 49152/tcp - -ENTRYPOINT ["/usr/local/bin/run_gluster.sh"] diff --git a/test/images/volume/gluster/README.md b/test/images/volume/gluster/README.md deleted file mode 100644 index 4f888ec4340..00000000000 --- a/test/images/volume/gluster/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Gluster server container for testing - -This container exports test_vol volume with an index.html inside. - -Used by test/e2e/* to test GlusterfsVolumeSource. Not for production use! - diff --git a/test/images/volume/gluster/VERSION b/test/images/volume/gluster/VERSION deleted file mode 100644 index c068b2447cc..00000000000 --- a/test/images/volume/gluster/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.4 diff --git a/test/images/volume/gluster/glusterd.vol b/test/images/volume/gluster/glusterd.vol deleted file mode 100644 index bf304c18432..00000000000 --- a/test/images/volume/gluster/glusterd.vol +++ /dev/null @@ -1,14 +0,0 @@ -# This is default glusterd.vol (incl. commented out base-port), -# with added "rpc-auth-allow-insecure on" to allow connection -# from non-privileged ports. - -volume management - type mgmt/glusterd - option working-directory /var/lib/glusterd - option transport-type socket,rdma - option transport.socket.keepalive-time 10 - option transport.socket.keepalive-interval 2 - option transport.socket.read-fail-log off -# option base-port 49152 - option rpc-auth-allow-insecure on -end-volume diff --git a/test/images/volume/gluster/index.html b/test/images/volume/gluster/index.html deleted file mode 100644 index 3c6a6b119a7..00000000000 --- a/test/images/volume/gluster/index.html +++ /dev/null @@ -1 +0,0 @@ -Hello from GlusterFS! diff --git a/test/images/volume/gluster/run_gluster.sh b/test/images/volume/gluster/run_gluster.sh deleted file mode 100755 index 8d9430a0d48..00000000000 --- a/test/images/volume/gluster/run_gluster.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$(mktemp -d)" - -function start() -{ - mount -t tmpfs test "$DIR" - chmod 755 "$DIR" - cp /vol/* "$DIR/" - /usr/sbin/glusterd -p /run/glusterd.pid - gluster volume create test_vol "$(hostname -i):$DIR" force - gluster volume start test_vol -} - -function stop() -{ - gluster --mode=script volume stop test_vol force - kill "$(cat /run/glusterd.pid)" - umount "$DIR" - rm -rf "$DIR" - exit 0 -} - - -trap stop TERM - -start "$@" - -while true; do - sleep 5 -done -