mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Merge pull request #90689 from aojea/nfsv6
add ipv6 support to the e2e nfs tests
This commit is contained in:
commit
bded41a817
@ -78,14 +78,14 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
ginkgo.Describe("NFSv4", func() {
|
||||
ginkgo.It("should be mountable for NFSv4", func() {
|
||||
config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
|
||||
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
|
||||
defer e2evolume.TestServerCleanup(f, config)
|
||||
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Server: serverHost,
|
||||
Path: "/",
|
||||
ReadOnly: true,
|
||||
},
|
||||
@ -102,14 +102,14 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
||||
|
||||
ginkgo.Describe("NFSv3", func() {
|
||||
ginkgo.It("should be mountable for NFSv3", func() {
|
||||
config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
|
||||
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
|
||||
defer e2evolume.TestServerCleanup(f, config)
|
||||
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Server: serverHost,
|
||||
Path: "/exports",
|
||||
ReadOnly: true,
|
||||
},
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -143,7 +144,7 @@ type Test struct {
|
||||
}
|
||||
|
||||
// NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, ip string) {
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
@ -155,8 +156,11 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
|
||||
if len(args) > 0 {
|
||||
config.ServerArgs = args
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
return config, pod, ip
|
||||
pod, host = CreateStorageServer(cs, config)
|
||||
if strings.Contains(host, ":") {
|
||||
host = "[" + host + "]"
|
||||
}
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||
|
@ -81,7 +81,7 @@ type nfsDriver struct {
|
||||
}
|
||||
|
||||
type nfsVolume struct {
|
||||
serverIP string
|
||||
serverHost string
|
||||
serverPod *v1.Pod
|
||||
f *framework.Framework
|
||||
}
|
||||
@ -129,7 +129,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume test
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
|
||||
return &v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: nv.serverIP,
|
||||
Server: nv.serverHost,
|
||||
Path: "/",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
@ -141,7 +141,7 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
|
||||
return &v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: nv.serverIP,
|
||||
Server: nv.serverHost,
|
||||
Path: "/",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
@ -199,10 +199,10 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
|
||||
case testpatterns.InlineVolume:
|
||||
fallthrough
|
||||
case testpatterns.PreprovisionedPV:
|
||||
c, serverPod, serverIP := e2evolume.NewNFSServer(cs, ns.Name, []string{})
|
||||
c, serverPod, serverHost := e2evolume.NewNFSServer(cs, ns.Name, []string{})
|
||||
config.ServerConfig = &c
|
||||
return &nfsVolume{
|
||||
serverIP: serverIP,
|
||||
serverHost: serverHost,
|
||||
serverPod: serverPod,
|
||||
f: f,
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
nfsServerPod *v1.Pod
|
||||
nfsPVconfig e2epv.PersistentVolumeConfig
|
||||
pvcConfig e2epv.PersistentVolumeClaimConfig
|
||||
nfsServerIP, clientNodeIP string
|
||||
nfsServerHost, clientNodeIP string
|
||||
clientNode *v1.Node
|
||||
volLabel labels.Set
|
||||
selector *metav1.LabelSelector
|
||||
@ -99,13 +99,13 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
|
||||
selector = metav1.SetAsLabelSelector(volLabel)
|
||||
// Start the NFS server pod.
|
||||
_, nfsServerPod, nfsServerIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
_, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
nfsPVconfig = e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: "nfs-",
|
||||
Labels: volLabel,
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: nfsServerIP,
|
||||
Server: nfsServerHost,
|
||||
Path: "/exports",
|
||||
ReadOnly: false,
|
||||
},
|
||||
|
@ -123,17 +123,17 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
|
||||
var (
|
||||
nfsServerPod *v1.Pod
|
||||
serverIP string
|
||||
serverHost string
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, nfsServerPod, serverIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
_, nfsServerPod, serverHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
pvConfig = e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: "nfs-",
|
||||
Labels: volLabel,
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Server: serverHost,
|
||||
Path: "/exports",
|
||||
ReadOnly: false,
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user