Move functions from e2e framework util.go

The following functions are called at some specific places only,
so this moves these functions to the places and makes them local.

- WaitForPersistentVolumeClaimDeleted: Moved to e2e storage
- PrintSummaries: Moved to e2e framework.go
- GetHostExternalAddress: Moved to e2e node
- WaitForMasters: Moved to e2e cloud gcp
- WaitForApiserverUp: Moved to e2e network
- WaitForKubeletUp: Moved to e2e storage vsphere
This commit is contained in:
Kenichi Omichi 2019-11-08 17:55:28 +00:00
parent ed3cc6afea
commit 94211f1839
9 changed files with 202 additions and 188 deletions

View File

@ -20,14 +20,18 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
@ -111,6 +115,48 @@ func removeZoneFromZones(zones []string, zone string) []string {
return zones return zones
} }
// generateMasterRegexp returns a regex for matching master node name.
func generateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func waitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
e2enode.Filter(nodes, func(node v1.Node) bool {
res, err := regexp.Match(generateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
framework.Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
e2enode.Filter(nodes, func(node v1.Node) bool {
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
framework.Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
framework.Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
f := framework.NewDefaultFramework("ha-master") f := framework.NewDefaultFramework("ha-master")
var c clientset.Interface var c clientset.Interface
@ -123,7 +169,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
additionalReplicaZones = make([]string, 0) additionalReplicaZones = make([]string, 0)
existingRCs = make([]string, 0) existingRCs = make([]string, 0)
}) })
@ -139,7 +185,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
for _, zone := range additionalReplicaZones { for _, zone := range additionalReplicaZones {
removeMasterReplica(zone) removeMasterReplica(zone)
} }
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
}) })
type Action int type Action int
@ -167,7 +213,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
framework.ExpectNoError(removeWorkerNodes(zone)) framework.ExpectNoError(removeWorkerNodes(zone))
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone) additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
} }
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute)) framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
// Verify that API server works correctly with HA master. // Verify that API server works correctly with HA master.

View File

@ -24,7 +24,9 @@ package framework
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil"
"math/rand" "math/rand"
"path"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -276,6 +278,43 @@ func (f *Framework) BeforeEach() {
f.flakeReport = NewFlakeReport() f.flakeReport = NewFlakeReport()
} }
// printSummaries prints summaries of tests.
func printSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
for i := range summaries {
Logf("Printing summary: %v", summaries[i].SummaryKind())
switch TestContext.OutputPrintType {
case "hr":
if TestContext.ReportDir == "" {
Logf(summaries[i].PrintHumanReadable())
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
case "json":
fallthrough
default:
if TestContext.OutputPrintType != "json" {
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
}
if TestContext.ReportDir == "" {
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
Logf("Finished")
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
Logf("Writing to %s", filePath)
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
}
}
}
// AfterEach deletes the namespace, after reading its events. // AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() { func (f *Framework) AfterEach() {
RemoveCleanupAction(f.cleanupHandle) RemoveCleanupAction(f.cleanupHandle)
@ -368,7 +407,7 @@ func (f *Framework) AfterEach() {
f.flakeReport = nil f.flakeReport = nil
} }
PrintSummaries(f.TestSummaries, f.BaseName) printSummaries(f.TestSummaries, f.BaseName)
// Check whether all nodes are ready after the test. // Check whether all nodes are ready after the test.
// This is explicitly done at the very end of the test, to avoid // This is explicitly done at the very end of the test, to avoid

View File

@ -31,7 +31,6 @@ import (
"os/exec" "os/exec"
"path" "path"
"path/filepath" "path/filepath"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -1640,21 +1639,6 @@ func RestartKubelet(host string) error {
return nil return nil
} }
// WaitForKubeletUp waits for the kubelet on the given host to be up.
func WaitForKubeletUp(host string) error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := e2essh.SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for kubelet timed out")
}
// RestartApiserver restarts the kube-apiserver. // RestartApiserver restarts the kube-apiserver.
func RestartApiserver(cs clientset.Interface) error { func RestartApiserver(cs clientset.Interface) error {
// TODO: Make it work for all providers. // TODO: Make it work for all providers.
@ -1699,17 +1683,6 @@ func sshRestartMaster() error {
return nil return nil
} }
// WaitForApiserverUp waits for the kube-apiserver to be up.
func WaitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// waitForApiserverRestarted waits until apiserver's restart count increased. // waitForApiserverRestarted waits until apiserver's restart count increased.
func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error { func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
@ -1780,101 +1753,6 @@ func WaitForControllerManagerUp() error {
return fmt.Errorf("waiting for controller-manager timed out") return fmt.Errorf("waiting for controller-manager timed out")
} }
// GenerateMasterRegexp returns a regex for matching master node name.
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// WaitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
e2enode.Filter(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
e2enode.Filter(nodes, func(node v1.Node) bool {
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
// GetHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func GetHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
type extractRT struct { type extractRT struct {
http.Header http.Header
} }
@ -2236,43 +2114,6 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
return err return err
} }
// PrintSummaries prints summaries of tests.
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
for i := range summaries {
Logf("Printing summary: %v", summaries[i].SummaryKind())
switch TestContext.OutputPrintType {
case "hr":
if TestContext.ReportDir == "" {
Logf(summaries[i].PrintHumanReadable())
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
case "json":
fallthrough
default:
if TestContext.OutputPrintType != "json" {
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
}
if TestContext.ReportDir == "" {
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
Logf("Finished")
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
Logf("Writing to %s", filePath)
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
}
}
}
// DumpDebugInfo dumps debug info of tests. // DumpDebugInfo dumps debug info of tests.
func DumpDebugInfo(c clientset.Interface, ns string) { func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
@ -2326,22 +2167,6 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
return &ds, nil return &ds, nil
} }
// WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
return nil
}
Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
}
}
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
}
// GetClusterZones returns the values of zone label collected from all nodes. // GetClusterZones returns the values of zone label collected from all nodes.
func GetClusterZones(c clientset.Interface) (sets.String, error) { func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})

View File

@ -120,6 +120,17 @@ func restartKubeProxy(host string) error {
return nil return nil
} }
// waitForApiserverUp waits for the kube-apiserver to be up.
func waitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
var _ = SIGDescribe("Services", func() { var _ = SIGDescribe("Services", func() {
f := framework.NewDefaultFramework("services") f := framework.NewDefaultFramework("services")
@ -542,7 +553,7 @@ var _ = SIGDescribe("Services", func() {
framework.Failf("error restarting apiserver: %v", err) framework.Failf("error restarting apiserver: %v", err)
} }
ginkgo.By("Waiting for apiserver to come up by polling /healthz") ginkgo.By("Waiting for apiserver to come up by polling /healthz")
if err := framework.WaitForApiserverUp(cs); err != nil { if err := waitForApiserverUp(cs); err != nil {
framework.Failf("error while waiting for apiserver up: %v", err) framework.Failf("error while waiting for apiserver up: %v", err)
} }
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))

View File

@ -177,6 +177,29 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
return rtnPod return rtnPod
} }
// getHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func getHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
// Checks for a lingering nfs mount and/or uid directory on the pod's host. The host IP is used // Checks for a lingering nfs mount and/or uid directory on the pod's host. The host IP is used
// so that this test runs in GCE, where it appears that SSH cannot resolve the hostname. // so that this test runs in GCE, where it appears that SSH cannot resolve the hostname.
// If expectClean is true then we expect the node to be cleaned up and thus commands like // If expectClean is true then we expect the node to be cleaned up and thus commands like
@ -189,7 +212,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
podDir := filepath.Join("/var/lib/kubelet/pods", string(pod.UID)) podDir := filepath.Join("/var/lib/kubelet/pods", string(pod.UID))
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs") mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
// use ip rather than hostname in GCE // use ip rather than hostname in GCE
nodeIP, err := framework.GetHostExternalAddress(c, pod) nodeIP, err := getHostExternalAddress(c, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
condMsg := "deleted" condMsg := "deleted"

View File

@ -20,7 +20,11 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"fmt"
"time"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/pkg/util/slice"
@ -32,6 +36,22 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
// waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
return nil
}
framework.Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
}
}
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
}
var _ = utils.SIGDescribe("PVC Protection", func() { var _ = utils.SIGDescribe("PVC Protection", func() {
var ( var (
client clientset.Interface client clientset.Interface
@ -92,7 +112,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Deleting the PVC") ginkgo.By("Deleting the PVC")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC") framework.ExpectNoError(err, "Error deleting PVC")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false pvcCreatedAndNotDeleted = false
}) })
@ -111,7 +131,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false pvcCreatedAndNotDeleted = false
}) })
@ -143,7 +163,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false pvcCreatedAndNotDeleted = false
}) })
}) })

View File

@ -112,6 +112,36 @@ func isSudoPresent(nodeIP string, provider string) bool {
return false return false
} }
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits // KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues.. // for the desired statues..
// - First issues the command via `systemctl` // - First issues the command via `systemctl`
@ -123,7 +153,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
systemctlPresent := false systemctlPresent := false
kubeletPid := "" kubeletPid := ""
nodeIP, err := framework.GetHostAddress(c, pod) nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"
@ -239,7 +269,7 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
// forceDelete is true indicating whether the pod is forcefully deleted. // forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked. // checkSubpath is true indicating whether the subpath should be checked.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) { func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
nodeIP, err := framework.GetHostAddress(c, clientPod) nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"
@ -315,7 +345,7 @@ func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.F
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down. // TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted. // forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) { func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
nodeIP, err := framework.GetHostAddress(c, clientPod) nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err, "Failed to get nodeIP.") framework.ExpectNoError(err, "Failed to get nodeIP.")
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"

View File

@ -40,6 +40,7 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere", importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [ deps = [
"//pkg/controller/volume/events:go_default_library", "//pkg/controller/volume/events:go_default_library",
"//pkg/master/ports:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",

View File

@ -18,6 +18,8 @@ package vsphere
import ( import (
"fmt" "fmt"
"strconv"
"time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -26,12 +28,29 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
// waitForKubeletUp waits for the kubelet on the given host to be up.
func waitForKubeletUp(host string) error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for kubelet timed out")
}
/* /*
Test to verify volume remains attached after kubelet restart on master node Test to verify volume remains attached after kubelet restart on master node
For the number of schedulable nodes, For the number of schedulable nodes,
@ -117,7 +136,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
framework.ExpectNoError(err, "Unable to restart kubelet on master node") framework.ExpectNoError(err, "Unable to restart kubelet on master node")
ginkgo.By("Verifying the kubelet on master node is up") ginkgo.By("Verifying the kubelet on master node is up")
err = framework.WaitForKubeletUp(masterAddress) err = waitForKubeletUp(masterAddress)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for i, pod := range pods { for i, pod := range pods {