remove GlusterFS references from test/e2e/* comments/descriptions

Signed-off-by: Humble Chirammal <humble.devassy@gmail.com>
This commit is contained in:
Humble Chirammal 2023-05-17 13:08:51 +05:30
parent d83c3ce7ca
commit 36f0caf7ec
8 changed files with 13 additions and 19 deletions

View File

@ -18,14 +18,14 @@ limitations under the License.
* This test checks that various VolumeSources are working. * This test checks that various VolumeSources are working.
* *
* There are two ways, how to test the volumes: * There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...) * 1) With containerized server (NFS, Ceph, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file. * The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod * Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file * and checks that the pod can see the file. It does so by importing the file
* into web server root and loading the index.html from it. * into web server root and loading the index.html from it.
* *
* These tests work only when privileged containers are allowed, exporting * These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or * various filesystems (ex: NFS) usually needs some mounting or
* other privileged magic in the server pod. * other privileged magic in the server pod.
* *
* Note that the server containers are for testing purposes only and should not * Note that the server containers are for testing purposes only and should not
@ -37,9 +37,6 @@ limitations under the License.
* and checks, that Kubernetes can use it as a volume. * and checks, that Kubernetes can use it as a volume.
*/ */
// GlusterFS test is duplicated from test/e2e/volumes.go. Any changes made there
// should be duplicated here
package storage package storage
import ( import (

View File

@ -78,7 +78,7 @@ type pvcval struct{}
type PVCMap map[types.NamespacedName]pvcval type PVCMap map[types.NamespacedName]pvcval
// PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object // PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object
// for varying storage options (NFS, ceph, glusterFS, etc.). // for varying storage options (NFS, ceph, etc.).
// (+optional) prebind holds a pre-bound PVC // (+optional) prebind holds a pre-bound PVC
// Example pvSource: // Example pvSource:
// //

View File

@ -18,14 +18,14 @@ limitations under the License.
* This test checks that various VolumeSources are working. * This test checks that various VolumeSources are working.
* *
* There are two ways, how to test the volumes: * There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...) * 1) With containerized server (NFS, Ceph, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file. * The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod * Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file * and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it. * into web server root and loading the index.html from it.
* *
* These tests work only when privileged containers are allowed, exporting * These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or * various filesystems (ex: NFS) usually needs some mounting or
* other privileged magic in the server pod. * other privileged magic in the server pod.
* *
* Note that the server containers are for testing purposes only and should not * Note that the server containers are for testing purposes only and should not
@ -87,7 +87,7 @@ const (
VolumeServerPodStartupTimeout = 3 * time.Minute VolumeServerPodStartupTimeout = 3 * time.Minute
// PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we // PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early. // don't tear down containers with NFS/Ceph server too early.
PodCleanupTimeout = 20 * time.Second PodCleanupTimeout = 20 * time.Second
) )

View File

@ -18,10 +18,10 @@ limitations under the License.
* This file defines various csi volume test drivers for TestSuites. * This file defines various csi volume test drivers for TestSuites.
* *
* There are two ways, how to prepare test drivers: * There are two ways, how to prepare test drivers:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...) * 1) With containerized server (NFS, Ceph, iSCSI, ...)
* It creates a server pod which defines one volume for the tests. * It creates a server pod which defines one volume for the tests.
* These tests work only when privileged containers are allowed, exporting * These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or * various filesystems (ex: NFS) usually needs some mounting or
* other privileged magic in the server pod. * other privileged magic in the server pod.
* *
* Note that the server containers are for testing purposes only and should not * Note that the server containers are for testing purposes only and should not

View File

@ -18,10 +18,10 @@ limitations under the License.
* This file defines various in-tree volume test drivers for TestSuites. * This file defines various in-tree volume test drivers for TestSuites.
* *
* There are two ways, how to prepare test drivers: * There are two ways, how to prepare test drivers:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...) * 1) With containerized server (NFS, Ceph, iSCSI, ...)
* It creates a server pod which defines one volume for the tests. * It creates a server pod which defines one volume for the tests.
* These tests work only when privileged containers are allowed, exporting * These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or * various filesystems (like NFS) usually needs some mounting or
* other privileged magic in the server pod. * other privileged magic in the server pod.
* *
* Note that the server containers are for testing purposes only and should not * Note that the server containers are for testing purposes only and should not

View File

@ -16,9 +16,6 @@ limitations under the License.
// This test checks that various VolumeSources are working. // This test checks that various VolumeSources are working.
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
package testsuites package testsuites
import ( import (

View File

@ -638,7 +638,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("creating a StorageClass") ginkgo.By("creating a StorageClass")
test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "invalid-aws")) test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "invalid-aws"))
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") ginkgo.By("creating a claim object")
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize, ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name, StorageClassName: &test.Class.Name,

View File

@ -210,7 +210,7 @@ sudo chmod o+x /etc/docker
``` ```
A few images have been mirrored from dockerhub into the `gcr.io/k8s-staging-e2e-test-images` registry A few images have been mirrored from dockerhub into the `gcr.io/k8s-staging-e2e-test-images` registry
(`busybox`, `glusterdynamic-provisioner`, `httpd`, `httpd-new`, `nginx`, `nginx-new`, `perl`), and they (`busybox`,`httpd`, `httpd-new`, `nginx`, `nginx-new`, `perl`), and they
only have a noop Dockerfile. However, due to an [issue](https://github.com/kubernetes/test-infra/issues/20884), only have a noop Dockerfile. However, due to an [issue](https://github.com/kubernetes/test-infra/issues/20884),
the same SHA cannot be pushed twice. A small change to them is required in order to generate a new SHA, the same SHA cannot be pushed twice. A small change to them is required in order to generate a new SHA,
which can then be pushed and promoted. which can then be pushed and promoted.