Moved e2e boilerplate to framework

This commit is contained in:
Vinicyus Macedo 2019-07-08 21:19:18 -03:00
parent 4824f823ad
commit 0f21b692e4
7 changed files with 255 additions and 220 deletions

View File

@ -51,7 +51,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -67,7 +66,6 @@ go_library(
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/aws:go_default_library",
"//test/e2e/framework/providers/azure:go_default_library",
@ -76,14 +74,11 @@ go_library(
"//test/e2e/framework/providers/openstack:go_default_library",
"//test/e2e/framework/providers/vsphere:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
],
)

View File

@ -18,11 +18,9 @@ package e2e
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
@ -30,20 +28,11 @@ import (
"github.com/onsi/gomega"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/logs"
"k8s.io/kubernetes/pkg/version"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/manifest"
testutils "k8s.io/kubernetes/test/utils"
utilnet "k8s.io/utils/net"
// ensure auth plugins are loaded
_ "k8s.io/client-go/plugin/pkg/client/auth"
@ -57,173 +46,21 @@ import (
_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
)
var (
cloudConfig = &framework.TestContext.CloudConfig
nodeKillerStopCh = make(chan struct{})
)
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Run only on Ginkgo node 1
switch framework.TestContext.Provider {
case "gce", "gke":
framework.LogClusterImageSources()
}
c, err := framework.LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
[]string{
metav1.NamespaceSystem,
metav1.NamespaceDefault,
metav1.NamespacePublic,
})
if err != nil {
e2elog.Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
e2elog.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
framework.TestContext.CloudConfig.NumNodes = len(framework.GetReadySchedulableNodesOrDie(c).Items)
}
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
e2elog.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
dc := c.DiscoveryClient
serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
e2elog.Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}
// Obtain the default IP family of the cluster
// Some e2e test are designed to work on IPv4 only, this global variable
// allows to adapt those tests to work on both IPv4 and IPv6
// TODO(dual-stack): dual stack clusters should pass full e2e testing at least with the primary IP family
// the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters,
// and services use the primary IP family by default
// If we´ll need to provide additional context for dual-stack, we can detect it
// because pods have two addresses (one per family)
framework.TestContext.IPFamily = getDefaultClusterIPFamily(c)
e2elog.Logf("Cluster IP family: %s", framework.TestContext.IPFamily)
// Reference common test to make the import valid.
commontest.CurrentSuite = commontest.E2E
if framework.TestContext.NodeKiller.Enabled {
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
nodeKillerStopCh = make(chan struct{})
go nodeKiller.Run(nodeKillerStopCh)
}
framework.SetupSuite()
return nil
}, func(data []byte) {
// Run on all Ginkgo nodes
})
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
e2elog.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
framework.CleanupSuite()
}, func() {
// Run only Ginkgo on node 1
e2elog.Logf("Running AfterSuite actions on node 1")
if framework.TestContext.ReportDir != "" {
framework.CoreDump(framework.TestContext.ReportDir)
}
if framework.TestContext.GatherSuiteMetricsAfterTest {
if err := gatherTestSuiteMetrics(); err != nil {
e2elog.Logf("Error gathering metrics: %v", err)
}
}
if framework.TestContext.NodeKiller.Enabled {
close(nodeKillerStopCh)
}
framework.AfterSuiteActions()
})
func gatherTestSuiteMetrics() error {
e2elog.Logf("Gathering metrics")
c, err := framework.LoadClientset()
if err != nil {
return fmt.Errorf("error loading client: %v", err)
}
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
grabber, err := e2emetrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
}
received, err := grabber.Grab()
if err != nil {
return fmt.Errorf("failed to grab metrics: %v", err)
}
metricsForE2E := (*e2emetrics.MetricsForE2E)(&received)
metricsJSON := metricsForE2E.PrintJSON()
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
return fmt.Errorf("error writing to %q: %v", filePath, err)
}
} else {
e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
}
return nil
}
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
@ -255,52 +92,3 @@ func RunE2ETests(t *testing.T) {
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
e2elog.Logf("Parsing pod from %v", path)
p, err := manifest.PodFromManifest(path)
if err != nil {
e2elog.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
e2elog.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
e2elog.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
e2elog.Logf("Output of clusterapi-tester:\n%v", logs)
}
}
// getDefaultClusterIPFamily obtains the default IP family of the cluster
// using the Cluster IP address of the kubernetes service created in the default namespace
// This unequivocally identifies the default IP family because services are single family
func getDefaultClusterIPFamily(c clientset.Interface) string {
// Get the ClusterIP of the kubernetes service created in the default namespace
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get kubernetes service ClusterIP: %v", err)
}
if utilnet.IsIPv6String(svc.Spec.ClusterIP) {
return "ipv6"
}
return "ipv4"
}

View File

@ -23,6 +23,7 @@ go_library(
"rc_util.go",
"resource_usage_gatherer.go",
"size.go",
"suites.go",
"test_context.go",
"util.go",
],
@ -46,6 +47,7 @@ go_library(
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/taints:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
@ -98,6 +100,7 @@ go_library(
"//test/e2e/framework/resource:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -324,6 +324,7 @@ type NodeKiller struct {
// NewNodeKiller creates new NodeKiller.
func NewNodeKiller(config NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
config.NodeKillerStopCh = make(chan struct{})
return &NodeKiller{config, client, provider}
}

View File

@ -0,0 +1,195 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"io/ioutil"
"path"
"time"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/version"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
var (
cloudConfig = &TestContext.CloudConfig
)
// SetupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
func SetupSuite() {
// Run only on Ginkgo node 1
switch TestContext.Provider {
case "gce", "gke":
LogClusterImageSources()
}
c, err := LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if TestContext.CleanStart {
deleted, err := DeleteNamespaces(c, nil, /* deleteFilter */
[]string{
metav1.NamespaceSystem,
metav1.NamespaceDefault,
metav1.NamespacePublic,
})
if err != nil {
e2elog.Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := WaitForNamespacesDeleted(c, deleted, NamespaceCleanupTimeout); err != nil {
e2elog.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
ExpectNoError(WaitForAllNodesSchedulable(c, TestContext.NodeSchedulableTimeout))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if TestContext.CloudConfig.NumNodes == DefaultNumNodes {
TestContext.CloudConfig.NumNodes = len(GetReadySchedulableNodesOrDie(c).Items)
}
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := TestContext.SystemPodsStartupTimeout
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(TestContext.MinStartupPods), int32(TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
e2elog.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := WaitForDaemonSets(c, metav1.NamespaceSystem, int32(TestContext.AllowedNotReadyNodes), TestContext.SystemDaemonsetStartupTimeout); err != nil {
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
dc := c.DiscoveryClient
serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
e2elog.Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}
// Obtain the default IP family of the cluster
// Some e2e test are designed to work on IPv4 only, this global variable
// allows to adapt those tests to work on both IPv4 and IPv6
// TODO(dual-stack): dual stack clusters should pass full e2e testing at least with the primary IP family
// the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters,
// and services use the primary IP family by default
// If we´ll need to provide additional context for dual-stack, we can detect it
// because pods have two addresses (one per family)
TestContext.IPFamily = getDefaultClusterIPFamily(c)
e2elog.Logf("Cluster IP family: %s", TestContext.IPFamily)
if TestContext.NodeKiller.Enabled {
nodeKiller := NewNodeKiller(TestContext.NodeKiller, c, TestContext.Provider)
go nodeKiller.Run(TestContext.NodeKiller.NodeKillerStopCh)
}
}
// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
func CleanupSuite() {
// Run on all Ginkgo nodes
e2elog.Logf("Running AfterSuite actions on all nodes")
RunCleanupActions()
}
// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite
func AfterSuiteActions() {
// Run only Ginkgo on node 1
e2elog.Logf("Running AfterSuite actions on node 1")
if TestContext.ReportDir != "" {
CoreDump(TestContext.ReportDir)
}
if TestContext.GatherSuiteMetricsAfterTest {
if err := gatherTestSuiteMetrics(); err != nil {
e2elog.Logf("Error gathering metrics: %v", err)
}
}
if TestContext.NodeKiller.Enabled {
close(TestContext.NodeKiller.NodeKillerStopCh)
}
}
func gatherTestSuiteMetrics() error {
e2elog.Logf("Gathering metrics")
c, err := LoadClientset()
if err != nil {
return fmt.Errorf("error loading client: %v", err)
}
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
grabber, err := e2emetrics.NewMetricsGrabber(c, nil, !ProviderIs("kubemark"), true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
}
received, err := grabber.Grab()
if err != nil {
return fmt.Errorf("failed to grab metrics: %v", err)
}
metricsForE2E := (*e2emetrics.MetricsForE2E)(&received)
metricsJSON := metricsForE2E.PrintJSON()
if TestContext.ReportDir != "" {
filePath := path.Join(TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
return fmt.Errorf("error writing to %q: %v", filePath, err)
}
} else {
e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
}
return nil
}

View File

@ -184,6 +184,8 @@ type NodeKillerConfig struct {
JitterFactor float64
// SimulatedDowntime is a duration between node is killed and recreated.
SimulatedDowntime time.Duration
// NodeKillerStopCh is a channel that is used to notify NodeKiller to stop killing nodes.
NodeKillerStopCh chan struct{}
}
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.

View File

@ -87,9 +87,11 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/manifest"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
utilnet "k8s.io/utils/net"
)
const (
@ -380,6 +382,55 @@ func RunIfSystemSpecNameIs(names ...string) {
skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
e2elog.Logf("Parsing pod from %v", path)
p, err := manifest.PodFromManifest(path)
if err != nil {
e2elog.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
e2elog.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
e2elog.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
e2elog.Logf("Output of clusterapi-tester:\n%v", logs)
}
}
// getDefaultClusterIPFamily obtains the default IP family of the cluster
// using the Cluster IP address of the kubernetes service created in the default namespace
// This unequivocally identifies the default IP family because services are single family
func getDefaultClusterIPFamily(c clientset.Interface) string {
// Get the ClusterIP of the kubernetes service created in the default namespace
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get kubernetes service ClusterIP: %v", err)
}
if utilnet.IsIPv6String(svc.Spec.ClusterIP) {
return "ipv6"
}
return "ipv4"
}
// ProviderIs returns true if the provider is included is the providers. Otherwise false.
func ProviderIs(providers ...string) bool {
for _, provider := range providers {