Merge pull request #90689 from aojea/nfsv6

add ipv6 support to the e2e nfs tests
This commit is contained in:
Kubernetes Prow Robot 2020-05-21 03:30:36 -07:00 committed by GitHub
commit bded41a817
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 34 additions and 30 deletions

View File

@ -78,14 +78,14 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
////////////////////////////////////////////////////////////////////////
ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func() {
config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Server: serverHost,
Path: "/",
ReadOnly: true,
},
@ -102,14 +102,14 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func() {
config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Server: serverHost,
Path: "/exports",
ReadOnly: true,
},

View File

@ -44,6 +44,7 @@ import (
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
@ -143,7 +144,7 @@ type Test struct {
}
// NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, ip string) {
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
config = TestConfig{
Namespace: namespace,
Prefix: "nfs",
@ -155,8 +156,11 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
if len(args) > 0 {
config.ServerArgs = args
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
pod, host = CreateStorageServer(cs, config)
if strings.Contains(host, ":") {
host = "[" + host + "]"
}
return config, pod, host
}
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.

View File

@ -81,9 +81,9 @@ type nfsDriver struct {
}
type nfsVolume struct {
serverIP string
serverPod *v1.Pod
f *framework.Framework
serverHost string
serverPod *v1.Pod
f *framework.Framework
}
var _ testsuites.TestDriver = &nfsDriver{}
@ -129,7 +129,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume test
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
return &v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nv.serverIP,
Server: nv.serverHost,
Path: "/",
ReadOnly: readOnly,
},
@ -141,7 +141,7 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
return &v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nv.serverIP,
Server: nv.serverHost,
Path: "/",
ReadOnly: readOnly,
},
@ -199,12 +199,12 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
c, serverPod, serverIP := e2evolume.NewNFSServer(cs, ns.Name, []string{})
c, serverPod, serverHost := e2evolume.NewNFSServer(cs, ns.Name, []string{})
config.ServerConfig = &c
return &nfsVolume{
serverIP: serverIP,
serverPod: serverPod,
f: f,
serverHost: serverHost,
serverPod: serverPod,
f: f,
}
case testpatterns.DynamicPV:
// Do nothing

View File

@ -78,15 +78,15 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
f := framework.NewDefaultFramework("disruptive-pv")
var (
c clientset.Interface
ns string
nfsServerPod *v1.Pod
nfsPVconfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
nfsServerIP, clientNodeIP string
clientNode *v1.Node
volLabel labels.Set
selector *metav1.LabelSelector
c clientset.Interface
ns string
nfsServerPod *v1.Pod
nfsPVconfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
nfsServerHost, clientNodeIP string
clientNode *v1.Node
volLabel labels.Set
selector *metav1.LabelSelector
)
ginkgo.BeforeEach(func() {
@ -99,13 +99,13 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod.
_, nfsServerPod, nfsServerIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
_, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
nfsPVconfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsServerIP,
Server: nfsServerHost,
Path: "/exports",
ReadOnly: false,
},

View File

@ -123,17 +123,17 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
var (
nfsServerPod *v1.Pod
serverIP string
serverHost string
)
ginkgo.BeforeEach(func() {
_, nfsServerPod, serverIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
_, nfsServerPod, serverHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Server: serverHost,
Path: "/exports",
ReadOnly: false,
},