From 7d5305718d382b9c981296c044ddb5563d5adf9f Mon Sep 17 00:00:00 2001 From: Jon Cope Date: Fri, 16 Dec 2016 11:43:20 -0600 Subject: [PATCH 1/4] Add PV E2E: disruptive environment tests Tweaked 'delete pod during stopped kubelet' test to check for mount Refactor start, stop, restart funcs into 1 func --- test/e2e/persistent_volumes-disruptive.go | 263 ++++++++++++++++++++++ 1 file changed, 263 insertions(+) create mode 100644 test/e2e/persistent_volumes-disruptive.go diff --git a/test/e2e/persistent_volumes-disruptive.go b/test/e2e/persistent_volumes-disruptive.go new file mode 100644 index 00000000000..2f69a240475 --- /dev/null +++ b/test/e2e/persistent_volumes-disruptive.go @@ -0,0 +1,263 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This test references +// persistent_volumes.go +// volumes.go + +package e2e + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/test/e2e/framework" + + "strings" +) + +type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) +type disruptiveTest struct { + testItStmt string + runTest testBody +} +type kubeletOpt string + +const ( + MIN_NODES = 2 + NODE_STATE_WAIT = 2 * time.Minute + kStart kubeletOpt = "start" + kStop kubeletOpt = "stop" + kRestart kubeletOpt = "restart" +) + +var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { + + f := framework.NewDefaultFramework("disruptive-pv") + var ( + c clientset.Interface + ns string + nfsServerPod *v1.Pod + nfsPVconfig persistentVolumeConfig + nfsServerIP, clientNodeIP string + clientNode *v1.Node + ) + + nfsServerConfig := VolumeTestConfig{ + namespace: v1.NamespaceDefault, + prefix: "nfs", + serverImage: "gcr.io/google_containers/volume-nfs:0.7", + serverPorts: []int{2049}, + serverArgs: []string{"-G", "777", "/exports"}, + } + + BeforeEach(func() { + // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. + framework.SkipUnlessNodeCountIsAtLeast(MIN_NODES) + c = f.ClientSet + ns = f.Namespace.Name + + // Start the NFS server pod. + if nfsServerPod == nil { + framework.Logf("[BeforeEach] Initializing NFS Server Pod") + nfsServerPod = startVolumeServer(c, nfsServerConfig) + + framework.Logf("[BeforeEach] Configuring PersistentVolume") + nfsServerIP = nfsServerPod.Status.PodIP + nfsPVconfig = persistentVolumeConfig{ + namePrefix: "nfs-", + pvSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + Server: nfsServerIP, + Path: "/exports", + ReadOnly: false, + }, + }, + } + } + // Get the first ready node IP that is not hosting the NFS pod. + if clientNodeIP == "" { + framework.Logf("Designating test node") + nodes := framework.GetReadySchedulableNodesOrDie(c) + for _, node := range nodes.Items { + if node.Name != nfsServerPod.Spec.NodeName { + clientNode = &node + break + } + } + } + }) + + AddCleanupAction(func() { + if nfsServerPod != nil && c != nil { + By("Deleting NFS server pod") + nfsServerPodCleanup(c, nfsServerConfig) + nfsServerPod = nil + } + }) + + Context("when kubelet restarts", func() { + + var ( + clientPod *v1.Pod + pv *v1.PersistentVolume + pvc *v1.PersistentVolumeClaim + ) + + BeforeEach(func() { + framework.Logf("Initializing test spec") + clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, ns, clientNode.Name) + }) + + AfterEach(func() { + framework.Logf("Tearing down test spec") + tearDownTestCase(c, f, ns, clientPod, pvc, pv) + }) + + // Test table housing the It() title string and test spec. runTest is type testBody, defined at + // the start of this file. To add tests, define a function mirroring the testBody signature and assign + // to runTest. + disruptiveTestTable := []disruptiveTest{ + { + testItStmt: "Should test that a file written to the mount before kubelet restart is stat-able after restart.", + runTest: testKubeletRestartsAndRestoresMount, + }, + { + testItStmt: "Should test that a volume mount to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.", + runTest: testVolumeUnmountsFromDeletedPod, + }, + } + + // Test loop executes each disruptiveTest iteratively. + for _, test := range disruptiveTestTable { + func(t disruptiveTest) { + It(t.testItStmt+" [Disruptive]", func() { + By("Executing Spec") + t.runTest(c, f, clientPod, pvc, pv) + }) + }(test) + } + }) +}) + +// SPEC: Test that a volume mounted to a pod remains mounted after a kubelet restarts +func testKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + file := "/mnt/_SUCCESS" + _, err := podExec(clientPod, "touch "+file) + Expect(err).NotTo(HaveOccurred()) + + kubeletCommand(kRestart, c, clientPod) + + _, err = podExec(clientPod, "cat "+file) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Pod %s detected %s after kubelet restart", clientPod.Name, file) +} + +// SPEC: Test that a volume unmounts if the client pod was deleted while the kubelet was down. +func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + + nodeIP, err := framework.GetHostExternalAddress(c, clientPod) + nodeIP = nodeIP + ":22" + Expect(err).NotTo(HaveOccurred()) + + result, err := framework.SSH("mount", nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred()) + if strings.Contains(result.Stdout, string(clientPod.UID)) { + framework.Logf("Sanity Check: Client UID %s found in `mount` output", clientPod.UID) + } else { + framework.Failf("Sanity Check: Client UID %s NOT found in `mount` output prior to pod deletion. Something has gone wrong.", clientPod.UID) + } + + file := "/mnt/_SUCCESS" + _, err = podExec(clientPod, "touch "+file) + Expect(err).NotTo(HaveOccurred()) + + kubeletCommand(kStop, c, clientPod) + deletePod(f, c, clientPod.Namespace, clientPod) + kubeletCommand(kStart, c, clientPod) + + result, err = framework.SSH("mount", nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred()) + if strings.Contains(result.Stdout, string(clientPod.UID)) { + framework.Failf("Client UID %s found in `mount` output. Volume failed to unmount.", clientPod.UID) + } else { + framework.Logf("Client UID %s not found in `mount` output. Volume has unmounted.", clientPod.UID) + } +} + +// Initialize a test spec with a pv, pvc, and nfs client pod +func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persistentVolumeConfig, ns, nodeName string) (pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { + pv, pvc = createPVPVC(c, pvConfig, ns, false) + pod = makePod(ns, pvc.Name) + pod.Spec.NodeName = nodeName + framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName) + pod, err := c.Core().Pods(ns).Create(pod) + Expect(err).NotTo(HaveOccurred()) + err = framework.WaitForPodRunningInNamespace(c, pod) + Expect(err).NotTo(HaveOccurred()) + + pod, err = c.Core().Pods(ns).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return +} + +// Post-spec clean up +func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, pod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + deletePod(f, c, ns, pod) + deletePersistentVolumeClaim(c, pvc.Name, ns) + deletePersistentVolume(c, pv.Name) +} + +// Start, Restart, or Stop the kubelet running on the node of the target pod. +// Allowed kubeltOps are `kStart`, `kStop`, and `kRestart` +func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) { + nodeIP, err := framework.GetHostExternalAddress(c, pod) + Expect(err).NotTo(HaveOccurred()) + nodeIP = nodeIP + ":22" + sshResult, err := framework.SSH("/etc/init.d/kubelet "+string(kOp), nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred()) + framework.LogSSHResult(sshResult) + + // On restart, waiting for node NotReady prevents a race condition where the node takes a few moments to leave the + // Ready state which in turn short circuits WaitForNodeToBeReady() + if kOp == kStop || kOp == kRestart { + if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NODE_STATE_WAIT); !ok { + framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) + } + } + if kOp == kStart || kOp == kRestart { + if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NODE_STATE_WAIT); !ok { + framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) + } + } + +} + +// Wraps RunKubectl to execute a bash cmd in target pod +func podExec(pod *v1.Pod, bashExec string) (string, error) { + args := strings.Split(bashExec, " ") + cmd := []string{"exec", "--namespace=" + pod.Namespace, pod.Name} + cmd = append(cmd, args...) + return framework.RunKubectl(cmd...) +} From a2b7193a095b8bd6a1b6006d01741dbe301de923 Mon Sep 17 00:00:00 2001 From: Jon Cope Date: Fri, 16 Dec 2016 17:43:45 -0600 Subject: [PATCH 2/4] minor fixes: changed string.Contains to mount | grep, nil pod, pv, pvc in AfterEach, set clientNodeIP in BeforeEach --- test/e2e/BUILD | 1 + test/e2e/persistent_volumes-disruptive.go | 38 ++++++++++------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 4e7146d1da8..dc484c27e98 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -80,6 +80,7 @@ go_library( "opaque_resource.go", "pd.go", "persistent_volumes.go", + "persistent_volumes-disruptive.go", "petset.go", "pod_gc.go", "pods.go", diff --git a/test/e2e/persistent_volumes-disruptive.go b/test/e2e/persistent_volumes-disruptive.go index 2f69a240475..d9c0ff8164f 100644 --- a/test/e2e/persistent_volumes-disruptive.go +++ b/test/e2e/persistent_volumes-disruptive.go @@ -21,6 +21,7 @@ limitations under the License. package e2e import ( + "strings" "time" . "github.com/onsi/ginkgo" @@ -29,8 +30,6 @@ import ( metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" - - "strings" ) type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) @@ -42,7 +41,7 @@ type kubeletOpt string const ( MIN_NODES = 2 - NODE_STATE_WAIT = 2 * time.Minute + NODE_STATE_WAIT = 1 * time.Minute kStart kubeletOpt = "start" kStop kubeletOpt = "stop" kRestart kubeletOpt = "restart" @@ -81,6 +80,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { framework.Logf("[BeforeEach] Configuring PersistentVolume") nfsServerIP = nfsServerPod.Status.PodIP + Expect(nfsServerIP).NotTo(BeEmpty()) nfsPVconfig = persistentVolumeConfig{ namePrefix: "nfs-", pvSource: v1.PersistentVolumeSource{ @@ -99,6 +99,8 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { for _, node := range nodes.Items { if node.Name != nfsServerPod.Spec.NodeName { clientNode = &node + clientNodeIP = framework.GetNodeExternalIP(clientNode) + Expect(clientNodeIP).NotTo(BeEmpty()) break } } @@ -129,6 +131,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { AfterEach(func() { framework.Logf("Tearing down test spec") tearDownTestCase(c, f, ns, clientPod, pvc, pv) + pv, pvc, clientPod = nil, nil, nil }) // Test table housing the It() title string and test spec. runTest is type testBody, defined at @@ -174,16 +177,12 @@ func testKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { nodeIP, err := framework.GetHostExternalAddress(c, clientPod) + Expect(err).NotTo(HaveOccurred()) nodeIP = nodeIP + ":22" - Expect(err).NotTo(HaveOccurred()) - result, err := framework.SSH("mount", nodeIP, framework.TestContext.Provider) + result, err := framework.SSH("mount | grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) - if strings.Contains(result.Stdout, string(clientPod.UID)) { - framework.Logf("Sanity Check: Client UID %s found in `mount` output", clientPod.UID) - } else { - framework.Failf("Sanity Check: Client UID %s NOT found in `mount` output prior to pod deletion. Something has gone wrong.", clientPod.UID) - } + Expect(result.Code).To(BeZero()) file := "/mnt/_SUCCESS" _, err = podExec(clientPod, "touch "+file) @@ -193,19 +192,15 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew deletePod(f, c, clientPod.Namespace, clientPod) kubeletCommand(kStart, c, clientPod) - result, err = framework.SSH("mount", nodeIP, framework.TestContext.Provider) + result, err = framework.SSH("mount| grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) - if strings.Contains(result.Stdout, string(clientPod.UID)) { - framework.Failf("Client UID %s found in `mount` output. Volume failed to unmount.", clientPod.UID) - } else { - framework.Logf("Client UID %s not found in `mount` output. Volume has unmounted.", clientPod.UID) - } + Expect(result.Code).NotTo(BeZero()) } // Initialize a test spec with a pv, pvc, and nfs client pod -func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persistentVolumeConfig, ns, nodeName string) (pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { - pv, pvc = createPVPVC(c, pvConfig, ns, false) - pod = makePod(ns, pvc.Name) +func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { + pv, pvc := createPVPVC(c, pvConfig, ns, false) + pod := makePod(ns, pvc.Name) pod.Spec.NodeName = nodeName framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName) pod, err := c.Core().Pods(ns).Create(pod) @@ -219,7 +214,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persis Expect(err).NotTo(HaveOccurred()) pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - return + return pod, pv, pvc } // Post-spec clean up @@ -248,10 +243,9 @@ func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) { } if kOp == kStart || kOp == kRestart { if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NODE_STATE_WAIT); !ok { - framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) + framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) } } - } // Wraps RunKubectl to execute a bash cmd in target pod From 30a296be827f7a9a4c92f46b19600d7884807b65 Mon Sep 17 00:00:00 2001 From: Jon Cope Date: Mon, 19 Dec 2016 12:19:22 -0600 Subject: [PATCH 3/4] camel case constants, tweak function documentation --- test/e2e/persistent_volumes-disruptive.go | 45 ++++++++++++----------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/test/e2e/persistent_volumes-disruptive.go b/test/e2e/persistent_volumes-disruptive.go index d9c0ff8164f..605a19bae89 100644 --- a/test/e2e/persistent_volumes-disruptive.go +++ b/test/e2e/persistent_volumes-disruptive.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,11 +40,11 @@ type disruptiveTest struct { type kubeletOpt string const ( - MIN_NODES = 2 - NODE_STATE_WAIT = 1 * time.Minute - kStart kubeletOpt = "start" - kStop kubeletOpt = "stop" - kRestart kubeletOpt = "restart" + MinNodes = 2 + NodeStateTimeout = 1 * time.Minute + kStart kubeletOpt = "start" + kStop kubeletOpt = "stop" + kRestart kubeletOpt = "restart" ) var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { @@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { BeforeEach(func() { // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. - framework.SkipUnlessNodeCountIsAtLeast(MIN_NODES) + framework.SkipUnlessNodeCountIsAtLeast(MinNodes) c = f.ClientSet ns = f.Namespace.Name @@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { runTest: testKubeletRestartsAndRestoresMount, }, { - testItStmt: "Should test that a volume mount to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.", + testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.", runTest: testVolumeUnmountsFromDeletedPod, }, } @@ -160,44 +160,47 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { }) }) -// SPEC: Test that a volume mounted to a pod remains mounted after a kubelet restarts +// testKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts func testKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + By("Writing to the volume.") file := "/mnt/_SUCCESS" _, err := podExec(clientPod, "touch "+file) Expect(err).NotTo(HaveOccurred()) + By("Restarting kubelet") kubeletCommand(kRestart, c, clientPod) + By("Testing that written file is accessible.") _, err = podExec(clientPod, "cat "+file) Expect(err).NotTo(HaveOccurred()) framework.Logf("Pod %s detected %s after kubelet restart", clientPod.Name, file) } -// SPEC: Test that a volume unmounts if the client pod was deleted while the kubelet was down. +// testVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { - nodeIP, err := framework.GetHostExternalAddress(c, clientPod) Expect(err).NotTo(HaveOccurred()) nodeIP = nodeIP + ":22" + By("Expecting the volume mount to be found.") result, err := framework.SSH("mount | grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) Expect(result.Code).To(BeZero()) - file := "/mnt/_SUCCESS" - _, err = podExec(clientPod, "touch "+file) - Expect(err).NotTo(HaveOccurred()) - + By("Restarting the kubelet.") kubeletCommand(kStop, c, clientPod) deletePod(f, c, clientPod.Namespace, clientPod) kubeletCommand(kStart, c, clientPod) + By("Expecting the volume mount not to be found.") result, err = framework.SSH("mount| grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) Expect(result.Code).NotTo(BeZero()) + + framework.Logf("Volume mount detected on pod and written file is readable post-restart.") } -// Initialize a test spec with a pv, pvc, and nfs client pod +// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { pv, pvc := createPVPVC(c, pvConfig, ns, false) pod := makePod(ns, pvc.Name) @@ -217,14 +220,14 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persis return pod, pv, pvc } -// Post-spec clean up +// tearDownTestCase destroy resources created by initTestCase. func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, pod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { deletePod(f, c, ns, pod) deletePersistentVolumeClaim(c, pvc.Name, ns) deletePersistentVolume(c, pv.Name) } -// Start, Restart, or Stop the kubelet running on the node of the target pod. +// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod. // Allowed kubeltOps are `kStart`, `kStop`, and `kRestart` func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) { nodeIP, err := framework.GetHostExternalAddress(c, pod) @@ -237,18 +240,18 @@ func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) { // On restart, waiting for node NotReady prevents a race condition where the node takes a few moments to leave the // Ready state which in turn short circuits WaitForNodeToBeReady() if kOp == kStop || kOp == kRestart { - if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NODE_STATE_WAIT); !ok { + if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) } } if kOp == kStart || kOp == kRestart { - if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NODE_STATE_WAIT); !ok { + if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) } } } -// Wraps RunKubectl to execute a bash cmd in target pod +// podExec wraps RunKubectl to execute a bash cmd in target pod func podExec(pod *v1.Pod, bashExec string) (string, error) { args := strings.Split(bashExec, " ") cmd := []string{"exec", "--namespace=" + pod.Namespace, pod.Name} From da604ad9fac4f016ae754fe238ead7d1fb626891 Mon Sep 17 00:00:00 2001 From: Jon Cope Date: Tue, 20 Dec 2016 13:55:12 -0600 Subject: [PATCH 4/4] Made server images paths constant, removed redundant import alias --- test/e2e/persistent_volumes-disruptive.go | 13 +++++-------- test/e2e/persistent_volumes.go | 4 ++-- test/e2e/volumes.go | 21 +++++++++++++++------ 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/test/e2e/persistent_volumes-disruptive.go b/test/e2e/persistent_volumes-disruptive.go index 605a19bae89..8bbe5aa2af1 100644 --- a/test/e2e/persistent_volumes-disruptive.go +++ b/test/e2e/persistent_volumes-disruptive.go @@ -21,7 +21,7 @@ limitations under the License. package e2e import ( - "strings" + "fmt" "time" . "github.com/onsi/ginkgo" @@ -47,7 +47,7 @@ const ( kRestart kubeletOpt = "restart" ) -var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { +var _ = framework.KubeDescribe("PersistentVolumes [Disruptive]", func() { f := framework.NewDefaultFramework("disruptive-pv") var ( @@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { nfsServerConfig := VolumeTestConfig{ namespace: v1.NamespaceDefault, prefix: "nfs", - serverImage: "gcr.io/google_containers/volume-nfs:0.7", + serverImage: NfsServerImage, serverPorts: []int{2049}, serverArgs: []string{"-G", "777", "/exports"}, } @@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:Disruptive", func() { // Test loop executes each disruptiveTest iteratively. for _, test := range disruptiveTestTable { func(t disruptiveTest) { - It(t.testItStmt+" [Disruptive]", func() { + It(t.testItStmt, func() { By("Executing Spec") t.runTest(c, f, clientPod, pvc, pv) }) @@ -253,8 +253,5 @@ func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) { // podExec wraps RunKubectl to execute a bash cmd in target pod func podExec(pod *v1.Pod, bashExec string) (string, error) { - args := strings.Split(bashExec, " ") - cmd := []string{"exec", "--namespace=" + pod.Namespace, pod.Name} - cmd = append(cmd, args...) - return framework.RunKubectl(cmd...) + return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec) } diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go index e2e8a03c33f..fbf5d97e1ef 100644 --- a/test/e2e/persistent_volumes.go +++ b/test/e2e/persistent_volumes.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/test/e2e/framework" @@ -509,7 +509,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() { NFSconfig = VolumeTestConfig{ namespace: v1.NamespaceDefault, prefix: "nfs", - serverImage: "gcr.io/google_containers/volume-nfs:0.7", + serverImage: NfsServerImage, serverPorts: []int{2049}, serverArgs: []string{"-G", "777", "/exports"}, } diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go index 3119b3d52d0..a59e132fc2d 100644 --- a/test/e2e/volumes.go +++ b/test/e2e/volumes.go @@ -49,7 +49,7 @@ import ( apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" "github.com/golang/glog" @@ -83,6 +83,15 @@ type VolumeTest struct { expectedContent string } +// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage +const ( + NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8" + IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1" + GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2" + CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1" + RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1" +) + // Starts a container specified by config.serverImage and exports all // config.serverPorts from it. The returned pod should be used to get the server // IP address and create appropriate VolumeSource. @@ -384,7 +393,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { config := VolumeTestConfig{ namespace: namespace.Name, prefix: "nfs", - serverImage: "gcr.io/google_containers/volume-nfs:0.8", + serverImage: NfsServerImage, serverPorts: []int{2049}, } @@ -424,7 +433,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { config := VolumeTestConfig{ namespace: namespace.Name, prefix: "gluster", - serverImage: "gcr.io/google_containers/volume-gluster:0.2", + serverImage: GlusterfsServerImage, serverPorts: []int{24007, 24008, 49152}, } @@ -509,7 +518,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { config := VolumeTestConfig{ namespace: namespace.Name, prefix: "iscsi", - serverImage: "gcr.io/google_containers/volume-iscsi:0.1", + serverImage: IscsiServerImage, serverPorts: []int{3260}, serverVolumes: map[string]string{ // iSCSI container needs to insert modules from the host @@ -556,7 +565,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { config := VolumeTestConfig{ namespace: namespace.Name, prefix: "rbd", - serverImage: "gcr.io/google_containers/volume-rbd:0.1", + serverImage: RbdServerImage, serverPorts: []int{6789}, serverVolumes: map[string]string{ // iSCSI container needs to insert modules from the host @@ -634,7 +643,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { config := VolumeTestConfig{ namespace: namespace.Name, prefix: "cephfs", - serverImage: "gcr.io/google_containers/volume-ceph:0.1", + serverImage: CephServerImage, serverPorts: []int{6789}, }