diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index acbf713c929..bcc37ad8873 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3788,19 +3788,39 @@ func RestartKubeProxy(host string) error { func RestartKubelet(host string) error { // TODO: Make it work for all providers and distros. - if !ProviderIs("gce", "aws") { - return fmt.Errorf("unsupported provider: %s", TestContext.Provider) + supportedProviders := []string{"gce", "aws", "vsphere"} + if !ProviderIs(supportedProviders...) { + return fmt.Errorf("unsupported provider: %s, supported providers are: %v", TestContext.Provider, supportedProviders) } if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") { return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro) } var cmd string + if ProviderIs("gce") && NodeOSDistroIs("debian") { cmd = "sudo /etc/init.d/kubelet restart" + } else if ProviderIs("vsphere") { + var sudoPresent bool + sshResult, err := SSH("sudo --version", host, TestContext.Provider) + if err != nil { + return fmt.Errorf("Unable to ssh to host %s with error %v", host, err) + } + if !strings.Contains(sshResult.Stderr, "command not found") { + sudoPresent = true + } + sshResult, err = SSH("systemctl --version", host, TestContext.Provider) + if !strings.Contains(sshResult.Stderr, "command not found") { + cmd = "systemctl restart kubelet" + } else { + cmd = "service kubelet restart" + } + if sudoPresent { + cmd = fmt.Sprintf("sudo %s", cmd) + } } else { cmd = "sudo systemctl restart kubelet" } - Logf("Restarting kubelet via ssh, running: %v", cmd) + Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd) result, err := SSH(cmd, host, TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index c9ca5dbed3c..c5c693931d9 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -31,6 +31,7 @@ go_library( "vsphere_volume_diskformat.go", "vsphere_volume_disksize.go", "vsphere_volume_fstype.go", + "vsphere_volume_master_restart.go", "vsphere_volume_ops_storm.go", "vsphere_volume_placement.go", "vsphere_volume_vsan_policy.go", diff --git a/test/e2e/storage/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere_volume_master_restart.go new file mode 100644 index 00000000000..36a0164aabe --- /dev/null +++ b/test/e2e/storage/vsphere_volume_master_restart.go @@ -0,0 +1,146 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" + "k8s.io/kubernetes/test/e2e/framework" +) + +/* + Test to verify volume remains attached after kubelet restart on master node + For the number of schedulable nodes, + 1. Create a volume with default volume options + 2. Create a Pod + 3. Verify the volume is attached + 4. Restart the kubelet on master node + 5. Verify again that the volume is attached + 6. Delete the pod and wait for the volume to be detached + 7. Delete the volume +*/ +var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() { + f := framework.NewDefaultFramework("restart-master") + + const labelKey = "vsphere_e2e_label" + var ( + client clientset.Interface + namespace string + volumePaths []string + pods []*v1.Pod + numNodes int + nodeKeyValueLabelList []map[string]string + nodeNameList []string + ) + BeforeEach(func() { + framework.SkipUnlessProviderIs("vsphere") + client = f.ClientSet + namespace = f.Namespace.Name + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + + nodes := framework.GetReadySchedulableNodesOrDie(client) + numNodes = len(nodes.Items) + if numNodes < 2 { + framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) + } + + for i := 0; i < numNodes; i++ { + nodeName := nodes.Items[i].Name + nodeNameList = append(nodeNameList, nodeName) + nodeLabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) + nodeKeyValueLabel := make(map[string]string) + nodeKeyValueLabel[labelKey] = nodeLabelValue + nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel) + framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue) + } + }) + + It("verify volume remains attached after master kubelet restart", func() { + vsp, err := vsphere.GetVSphere() + Expect(err).NotTo(HaveOccurred()) + + // Create pod on each node + for i := 0; i < numNodes; i++ { + By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) + volumePath, err := createVSphereVolume(vsp, nil) + Expect(err).NotTo(HaveOccurred()) + + volumePaths = append(volumePaths, volumePath) + + By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) + podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) + pod, err := client.CoreV1().Pods(namespace).Create(podspec) + Expect(err).NotTo(HaveOccurred()) + defer framework.DeletePodWithWait(f, client, pod) + + By("Waiting for pod to be ready") + Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + + pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pods = append(pods, pod) + + nodeName := types.NodeName(pod.Spec.NodeName) + By(fmt.Sprintf("Verify volume %s is attached to the pod %v", volumePath, nodeName)) + isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, types.NodeName(nodeName)) + Expect(err).NotTo(HaveOccurred()) + Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) + + } + + By("Restarting kubelet on master node") + masterAddress := framework.GetMasterHost() + ":22" + err = framework.RestartKubelet(masterAddress) + Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node") + + By("Verifying the kubelet on master node is up") + err = framework.WaitForKubeletUp(masterAddress) + Expect(err).NotTo(HaveOccurred()) + + for i, pod := range pods { + volumePath := volumePaths[i] + + nodeName := types.NodeName(pod.Spec.NodeName) + By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName)) + isAttached, err := verifyVSphereDiskAttached(vsp, volumePaths[i], types.NodeName(nodeName)) + Expect(err).NotTo(HaveOccurred()) + Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) + + By(fmt.Sprintf("Deleting pod on node %v", nodeName)) + err = framework.DeletePodWithWait(f, client, pod) + Expect(err).NotTo(HaveOccurred()) + + By(fmt.Sprintf("Waiting for volume %s to be detached from the node %v", volumePath, nodeName)) + err = waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName)) + Expect(err).NotTo(HaveOccurred()) + + By(fmt.Sprintf("Deleting volume %s", volumePath)) + err = vsp.DeleteVolume(volumePath) + Expect(err).NotTo(HaveOccurred()) + } + }) +})