removal of glusterfs in-tree driver code from the source

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal 2022-08-24 21:46:19 +05:30
parent 7be35679b8
commit c2a7bfd6b4
21 changed files with 2 additions and 2938 deletions

View File

@ -14,6 +14,6 @@
FROM ubuntu:xenial
RUN apt-get update && apt-get install -y netbase nfs-common=1:1.2.8-9ubuntu12 glusterfs-client=3.7.6-1ubuntu1
RUN apt-get update && apt-get install -y netbase nfs-common=1:1.2.8-9ubuntu12
ENTRYPOINT ["/bin/mount"]

View File

@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/fc"
"k8s.io/kubernetes/pkg/volume/flexvolume"
"k8s.io/kubernetes/pkg/volume/glusterfs"
"k8s.io/kubernetes/pkg/volume/hostpath"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/local"
@ -78,7 +77,6 @@ func ProbeExpandableVolumePlugins(config persistentvolumeconfig.VolumeConfigurat
if err != nil {
return allPlugins, err
}
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
return allPlugins, nil
}
@ -118,7 +116,6 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config persiste
klog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
}
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
var err error
allPlugins, err = appendExpandableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/volume/fc"
"k8s.io/kubernetes/pkg/volume/flexvolume"
"k8s.io/kubernetes/pkg/volume/git_repo"
"k8s.io/kubernetes/pkg/volume/glusterfs"
"k8s.io/kubernetes/pkg/volume/hostpath"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/local"
@ -64,7 +63,6 @@ func ProbeVolumePlugins(featureGate featuregate.FeatureGate) ([]volume.VolumePlu
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)

View File

@ -45,7 +45,6 @@ import (
"k8s.io/kubernetes/pkg/volume/emptydir"
"k8s.io/kubernetes/pkg/volume/fc"
"k8s.io/kubernetes/pkg/volume/git_repo"
"k8s.io/kubernetes/pkg/volume/glusterfs"
"k8s.io/kubernetes/pkg/volume/hostpath"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/local"
@ -73,7 +72,6 @@ func volumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)

File diff suppressed because it is too large Load Diff

View File

@ -1,193 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// This implementation is space-efficient for a sparse
// allocation over a big range. Could be optimized
// for high absolute allocation number with a bitmap.
//
package glusterfs
import (
"errors"
"sync"
)
var (
//ErrConflict returned when value is already in use.
ErrConflict = errors.New("number already allocated")
//ErrInvalidRange returned invalid range, for eg# min > max
ErrInvalidRange = errors.New("invalid range")
//ErrOutOfRange returned when value is not in pool range.
ErrOutOfRange = errors.New("out of range")
//ErrRangeFull returned when no more free values in the pool.
ErrRangeFull = errors.New("range full")
//ErrInternal returned when no free item found, but a.free != 0.
ErrInternal = errors.New("internal error")
)
// MinMaxAllocator defines allocator struct.
type MinMaxAllocator struct {
lock sync.Mutex
min int
max int
free int
used map[int]bool
}
var _ Rangeable = &MinMaxAllocator{}
// Rangeable is an Interface that can adjust its min/max range.
// Rangeable should be threadsafe
type Rangeable interface {
Allocate(int) (bool, error)
AllocateNext() (int, bool, error)
Release(int) error
Has(int) bool
Free() int
SetRange(min, max int) error
}
// NewMinMaxAllocator return a new allocator or error based on provided min/max value.
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
if min > max {
return nil, ErrInvalidRange
}
return &MinMaxAllocator{
min: min,
max: max,
free: 1 + max - min,
used: map[int]bool{},
}, nil
}
// SetRange defines the range/pool with provided min and max values.
func (a *MinMaxAllocator) SetRange(min, max int) error {
if min > max {
return ErrInvalidRange
}
a.lock.Lock()
defer a.lock.Unlock()
// Check if we need to change
if a.min == min && a.max == max {
return nil
}
a.min = min
a.max = max
// Recompute how many free we have in the range
numUsed := 0
for i := range a.used {
if a.inRange(i) {
numUsed++
}
}
a.free = 1 + max - min - numUsed
return nil
}
// Allocate allocates provided value in the allocator and mark it as used.
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
if !a.inRange(i) {
return false, ErrOutOfRange
}
if a.has(i) {
return false, ErrConflict
}
a.used[i] = true
a.free--
return true, nil
}
// AllocateNext allocates next value from the allocator.
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
// Fast check if we're out of items
if a.free <= 0 {
return 0, false, ErrRangeFull
}
// Scan from the minimum until we find a free item
for i := a.min; i <= a.max; i++ {
if !a.has(i) {
a.used[i] = true
a.free--
return i, true, nil
}
}
// no free item found, but a.free != 0
return 0, false, ErrInternal
}
// Release free/delete provided value from the allocator.
func (a *MinMaxAllocator) Release(i int) error {
a.lock.Lock()
defer a.lock.Unlock()
if !a.has(i) {
return nil
}
delete(a.used, i)
if a.inRange(i) {
a.free++
}
return nil
}
func (a *MinMaxAllocator) has(i int) bool {
_, ok := a.used[i]
return ok
}
// Has check whether the provided value is used in the allocator
func (a *MinMaxAllocator) Has(i int) bool {
a.lock.Lock()
defer a.lock.Unlock()
return a.has(i)
}
// Free returns the number of free values in the allocator.
func (a *MinMaxAllocator) Free() int {
a.lock.Lock()
defer a.lock.Unlock()
return a.free
}
func (a *MinMaxAllocator) inRange(i int) bool {
return a.min <= i && i <= a.max
}

View File

@ -1,226 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"testing"
)
func TestNewFree(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if f := m.Free(); f != (max - min + 1) {
t.Errorf("expect to get %d free, but got %d", (max - min + 1), f)
}
}
func TestNewInvalidRange(t *testing.T) {
if _, err := NewMinMaxAllocator(10, 1); err != ErrInvalidRange {
t.Errorf("expect to get Error '%v', got '%v'", ErrInvalidRange, err)
}
}
func TestSetRange(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if err = m.SetRange(10, 1); err != ErrInvalidRange {
t.Errorf("expected to get error '%v', got '%v'", ErrInvalidRange, err)
}
if err = m.SetRange(1, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 2 {
t.Errorf("expect to get %d free, but got %d", 2, f)
}
if ok, _ := m.Allocate(1); !ok {
t.Errorf("error allocate offset %v", 1)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
if err = m.SetRange(1, 1); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get 0 free, but got %d", f)
}
if err = m.SetRange(2, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
}
func TestAllocateNext(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
el, ok, _ := m.AllocateNext()
if !ok {
t.Fatalf("unexpected error")
}
if !m.Has(el) {
t.Errorf("expect element %v allocated", el)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateMax(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
for i := 1; i <= max; i++ {
if _, ok, _ := m.AllocateNext(); !ok {
t.Fatalf("unexpected error")
}
}
if _, ok, _ := m.AllocateNext(); ok {
t.Errorf("unexpected success")
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get %d free, but got %d", 0, f)
}
}
func TestAllocate(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect element %v allocated", offset)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateConflict(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrConflict {
t.Errorf("expected error '%v', got '%v'", ErrConflict, err)
}
}
func TestAllocateOutOfRange(t *testing.T) {
min := 1
max := 10
offset := 11
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrOutOfRange {
t.Errorf("expected error '%v', got '%v'", ErrOutOfRange, err)
}
}
func TestRelease(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect offset %v allocated", offset)
}
if err = m.Release(offset); err != nil {
t.Errorf("unexpected error: %v", err)
}
if m.Has(offset) {
t.Errorf("expect offset %v not allocated", offset)
}
}

View File

@ -1,764 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"fmt"
"os"
"path/filepath"
"reflect"
"testing"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Glusterfs: &v1.GlusterfsPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, mount.NewFakeMounter(nil))
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
expectedPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~glusterfs/vol1")
if volumePath != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath)
}
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func TestPluginVolume(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestParseClassParameters(t *testing.T) {
secret := v1.Secret{
Type: "kubernetes.io/glusterfs",
Data: map[string][]byte{
"data": []byte("mypassword"),
},
}
tests := []struct {
name string
parameters map[string]string
secret *v1.Secret
expectError bool
expectConfig *provisionerConfig
}{
{
"password",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
user: "admin",
userKey: "password",
secretValue: "password",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"secret",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"secretname": "mysecret",
"secretnamespace": "default",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
user: "admin",
secretName: "mysecret",
secretNamespace: "default",
secretValue: "mypassword",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"no authentication",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"missing secret",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
"secretnamespace": "default",
},
nil, // secret
true, // expect error
nil,
},
{
"secret with no namespace",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
},
&secret,
true, // expect error
nil,
},
{
"missing url",
map[string]string{
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"unknown parameter",
map[string]string{
"unknown": "yes",
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"invalid gidMin #1",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "0",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin #2",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin #3",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #1",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "0",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #2",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #3",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin:gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "5001",
"gidMax": "5000",
},
&secret,
true, // expect error
nil,
},
{
"valid gidMin",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "5000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid gidMin:gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid volumetype: replicate",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid volumetype: disperse",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid snapfactor: 50",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
"snapfactor": "50",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
thinPoolSnapFactor: float32(50),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid volumenameprefix: dept-dev",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
"snapfactor": "50",
"volumenameprefix": "dept-dev",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
thinPoolSnapFactor: float32(50),
volumeNamePrefix: "dept-dev",
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"invalid volumetype (disperse) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "disperse:4:asd",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype (replicate) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "replicate:asd",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype: unknown volumetype",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "dispersereplicate:4:2",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype : negative value",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "replicate:-1000",
},
&secret,
true, // expect error
nil,
},
{
"invalid thinPoolSnapFactor: value out of range",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"snapfactor": "0.5",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumenameprefix: string starting with '_'",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumenameprefix": "_",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumenameprefix: string with '_'",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumenameprefix": "qe_dept",
},
&secret,
true, // expect error
nil,
},
{
"invalid thinPoolSnapFactor: value out of range",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"snapfactor": "120",
},
&secret,
true, // expect error
nil,
},
{
"enable custom ep/svc name: customEpNamePrefix: myprefix",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "myprefix",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "myprefix",
},
},
{
"empty custom ep/svc name: customEpNamePrefix:''",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "",
},
},
{
"custom ep/svc name with 26 chars: customEpNamePrefix:'charstringhastwentysixchar'",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "charstringhastwentysixchar",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "charstringhastwentysixchar",
},
},
{
"invalid customepnameprefix ( ie >26 chars) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "myprefixhasmorethan26characters",
},
&secret,
true, // expect error
nil,
},
}
for _, test := range tests {
client := &fake.Clientset{}
client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if test.secret != nil {
return true, test.secret, nil
}
return true, nil, fmt.Errorf("test %s did not set a secret", test.name)
})
cfg, err := parseClassParameters(test.parameters, client)
if err != nil && !test.expectError {
t.Errorf("Test %s got unexpected error %v", test.name, err)
}
if err == nil && test.expectError {
t.Errorf("test %s expected error and got none", test.name)
}
if test.expectConfig != nil {
if !reflect.DeepEqual(cfg, test.expectConfig) {
t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg)
}
}
}
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"bufio"
"fmt"
"os"
"k8s.io/klog/v2"
)
// readGlusterLog will take the last 2 lines of the log file
// on failure of gluster SetUp and return those so kubelet can
// properly expose them
// return error on any failure
func readGlusterLog(path string, podName string) error {
var line1 string
var line2 string
linecount := 0
klog.Infof("failure, now attempting to read the gluster log for pod %s", podName)
// Check and make sure path exists
if len(path) == 0 {
return fmt.Errorf("log file does not exist for pod %s", podName)
}
// open the log file
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("could not open log file for pod %s", podName)
}
defer file.Close()
// read in and scan the file using scanner
// from stdlib
fscan := bufio.NewScanner(file)
// rather than guessing on bytes or using Seek
// going to scan entire file and take the last two lines
// generally the file should be small since it is pod specific
for fscan.Scan() {
if linecount > 0 {
line1 = line2
}
line2 = "\n" + fscan.Text()
linecount++
}
if linecount > 0 {
return fmt.Errorf("%v", line1+line2+"\n")
}
return nil
}

View File

@ -43,18 +43,14 @@ limitations under the License.
package storage
import (
"context"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
admissionapi "k8s.io/pod-security-admission/api"
)
// TODO(#99468): Check if these tests are still needed.
@ -123,39 +119,4 @@ var _ = SIGDescribe("Volumes", func() {
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
ginkgo.Describe("GlusterFS", func() {
ginkgo.It("should be mountable", func() {
// create gluster server and endpoints
config, _, _ := e2evolume.NewGlusterfsServer(c, namespace.Name)
name := config.Prefix + "-server"
defer func() {
e2evolume.TestServerCleanup(f, config)
err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
}
}()
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
ExpectedContent: "Hello from GlusterFS!",
},
}
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
})
})
})

View File

@ -166,65 +166,6 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
return config, pod, host
}
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
config = TestConfig{
Namespace: namespace,
Prefix: "gluster",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
ServerPorts: []int{24007, 24008, 49152},
}
pod, ip = CreateStorageServer(cs, config)
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Protocol: v1.ProtocolTCP,
Port: 24007,
},
},
},
}
_, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create service for Gluster server")
ginkgo.By("creating Gluster endpoints")
endpoints := &v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: ip,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
_, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
return config, pod, ip
}
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.

View File

@ -46,7 +46,6 @@ import (
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
@ -219,143 +218,6 @@ func (v *nfsVolume) DeleteVolume() {
cleanUpVolumeServer(v.f, v.serverPod)
}
// Gluster
type glusterFSDriver struct {
driverInfo storageframework.DriverInfo
}
type glusterVolume struct {
prefix string
serverPod *v1.Pod
f *framework.Framework
}
var _ storageframework.TestDriver = &glusterFSDriver{}
var _ storageframework.PreprovisionedVolumeTestDriver = &glusterFSDriver{}
var _ storageframework.InlineVolumeTestDriver = &glusterFSDriver{}
var _ storageframework.PreprovisionedPVTestDriver = &glusterFSDriver{}
// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface
func InitGlusterFSDriver() storageframework.TestDriver {
return &glusterFSDriver{
driverInfo: storageframework.DriverInfo{
Name: "gluster",
InTreePluginName: "kubernetes.io/glusterfs",
MaxFileSize: storageframework.FileSizeMedium,
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Gi",
},
SupportedFsType: sets.NewString(
"", // Default fsType
),
Capabilities: map[storageframework.Capability]bool{
storageframework.CapPersistence: true,
storageframework.CapExec: true,
storageframework.CapRWX: true,
storageframework.CapMultiPODs: true,
},
},
}
}
func (g *glusterFSDriver) GetDriverInfo() *storageframework.DriverInfo {
return &g.driverInfo
}
func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
}
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
gv, ok := e2evolume.(*glusterVolume)
if !ok {
framework.Failf("failed to cast test volume type %T to the Gluster test volume", e2evolume)
}
name := gv.prefix + "-server"
return &v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: readOnly,
},
}
}
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := e2evolume.(*glusterVolume)
if !ok {
framework.Failf("failed to cast test volume of type %T to the Gluster test volume", e2evolume)
}
name := gv.prefix + "-server"
return &v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: readOnly,
},
}, nil
}
func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
return &storageframework.PerTestConfig{
Driver: g,
Prefix: "gluster",
Framework: f,
}, func() {}
}
func (g *glusterFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name)
config.ServerConfig = &c
return &glusterVolume{
prefix: config.Prefix,
serverPod: serverPod,
f: f,
}
}
func (v *glusterVolume) DeleteVolume() {
f := v.f
cs := f.ClientSet
ns := f.Namespace
name := v.prefix + "-server"
nameSpaceName := fmt.Sprintf("%s/%s", ns.Name, name)
framework.Logf("Deleting Gluster endpoints %s...", nameSpaceName)
err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
framework.Failf("Gluster deleting endpoint %s failed: %v", nameSpaceName, err)
}
framework.Logf("Gluster endpoints %q not found, assuming deleted", nameSpaceName)
}
framework.Logf("Deleting Gluster service %s...", nameSpaceName)
err = cs.CoreV1().Services(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
framework.Failf("Gluster deleting service %s failed: %v", nameSpaceName, err)
}
framework.Logf("Gluster service %q not found, assuming deleted", nameSpaceName)
}
framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
err = e2epod.DeletePodWithWait(cs, v.serverPod)
if err != nil {
framework.Failf("Gluster server pod delete failed: %v", err)
}
}
// iSCSI
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
type iSCSIDriver struct {

View File

@ -30,7 +30,6 @@ import (
// List of testDrivers to be executed in below loop
var testDrivers = []func() storageframework.TestDriver{
drivers.InitNFSDriver,
drivers.InitGlusterFSDriver,
drivers.InitISCSIDriver,
drivers.InitRbdDriver,
drivers.InitCephFSDriver,

View File

@ -19,7 +19,6 @@ package storage
import (
"context"
"fmt"
"net"
"strings"
"time"
@ -51,7 +50,6 @@ import (
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -680,37 +678,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
})
})
ginkgo.Describe("GlusterDynamicProvisioner", func() {
ginkgo.It("should create and delete persistent volumes [fast]", func() {
e2eskipper.SkipIfProviderIs("gke")
ginkgo.By("creating a Gluster DP server Pod")
pod := startGlusterDpServerPod(c, ns)
serverURL := "http://" + net.JoinHostPort(pod.Status.PodIP, "8081")
ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverURL},
}
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "glusterdptest"))
defer clearStorageClass()
test.Class = storageClass
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
}, ns)
test.TestDynamicProvisioning()
})
})
ginkgo.Describe("Invalid AWS KMS key", func() {
ginkgo.It("should report an error and create no PV", func() {
e2eskipper.SkipUnlessProviderIs("aws")
@ -880,55 +847,6 @@ func getStorageClass(
}
}
func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "glusterdynamic-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "glusterdynamic-provisioner",
Image: imageutils.GetE2EImage(imageutils.GlusterDynamicProvisioner),
Args: []string{
"-config=" + "/etc/heketi/heketi.json",
},
Ports: []v1.ContainerPort{
{Name: "heketi", ContainerPort: 8081},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {

View File

@ -1,4 +0,0 @@
linux/amd64=fedora:36
linux/arm64=arm64v8/fedora:36
linux/ppc64le=ppc64le/fedora:36
linux/s390x=s390x/fedora:36

View File

@ -1,28 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG BASEIMAGE
FROM $BASEIMAGE
CROSS_BUILD_COPY qemu-QEMUARCH-static /usr/bin/
RUN yum -y install hostname glusterfs-server && yum clean all
ADD glusterd.vol /etc/glusterfs/
ADD run_gluster.sh /usr/local/bin/
ADD index.html /vol/
RUN chmod 644 /vol/index.html
EXPOSE 24007/tcp 49152/tcp
ENTRYPOINT ["/usr/local/bin/run_gluster.sh"]

View File

@ -1,6 +0,0 @@
# Gluster server container for testing
This container exports test_vol volume with an index.html inside.
Used by test/e2e/* to test GlusterfsVolumeSource. Not for production use!

View File

@ -1 +0,0 @@
1.4

View File

@ -1,14 +0,0 @@
# This is default glusterd.vol (incl. commented out base-port),
# with added "rpc-auth-allow-insecure on" to allow connection
# from non-privileged ports.
volume management
type mgmt/glusterd
option working-directory /var/lib/glusterd
option transport-type socket,rdma
option transport.socket.keepalive-time 10
option transport.socket.keepalive-interval 2
option transport.socket.read-fail-log off
# option base-port 49152
option rpc-auth-allow-insecure on
end-volume

View File

@ -1 +0,0 @@
Hello from GlusterFS!

View File

@ -1,46 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DIR="$(mktemp -d)"
function start()
{
mount -t tmpfs test "$DIR"
chmod 755 "$DIR"
cp /vol/* "$DIR/"
/usr/sbin/glusterd -p /run/glusterd.pid
gluster volume create test_vol "$(hostname -i):$DIR" force
gluster volume start test_vol
}
function stop()
{
gluster --mode=script volume stop test_vol force
kill "$(cat /run/glusterd.pid)"
umount "$DIR"
rm -rf "$DIR"
exit 0
}
trap stop TERM
start "$@"
while true; do
sleep 5
done