mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Move funcs of suites.go to e2e.go
This commit is contained in:
parent
1488460cd2
commit
da89d212dd
@ -61,6 +61,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/logs:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/version:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
@ -74,11 +75,14 @@ go_library(
|
||||
"//test/e2e/framework/providers/openstack:go_default_library",
|
||||
"//test/e2e/framework/providers/vsphere:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
180
test/e2e/e2e.go
180
test/e2e/e2e.go
@ -21,19 +21,30 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/component-base/version"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
utilnet "k8s.io/utils/net"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
// ensure auth plugins are loaded
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
@ -49,11 +60,11 @@ import (
|
||||
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
// Reference common test to make the import valid.
|
||||
commontest.CurrentSuite = commontest.E2E
|
||||
framework.SetupSuite()
|
||||
setupSuite()
|
||||
return nil
|
||||
}, func(data []byte) {
|
||||
// Run on all Ginkgo nodes
|
||||
framework.SetupSuitePerGinkgoNode()
|
||||
setupSuitePerGinkgoNode()
|
||||
})
|
||||
|
||||
var _ = ginkgo.SynchronizedAfterSuite(func() {
|
||||
@ -93,3 +104,166 @@ func RunE2ETests(t *testing.T) {
|
||||
|
||||
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
|
||||
}
|
||||
|
||||
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
|
||||
// to flip to Ready, log its output and delete it.
|
||||
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
||||
path := "test/images/clusterapi-tester/pod.yaml"
|
||||
framework.Logf("Parsing pod from %v", path)
|
||||
p, err := manifest.PodFromManifest(path)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
|
||||
return
|
||||
}
|
||||
p.Namespace = ns
|
||||
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
|
||||
framework.Logf("Failed to create %v: %v", p.Name, err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
|
||||
}
|
||||
}()
|
||||
timeout := 5 * time.Minute
|
||||
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
||||
return
|
||||
}
|
||||
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
||||
} else {
|
||||
framework.Logf("Output of clusterapi-tester:\n%v", logs)
|
||||
}
|
||||
}
|
||||
|
||||
// getDefaultClusterIPFamily obtains the default IP family of the cluster
|
||||
// using the Cluster IP address of the kubernetes service created in the default namespace
|
||||
// This unequivocally identifies the default IP family because services are single family
|
||||
// TODO: dual-stack may support multiple families per service
|
||||
// but we can detect if a cluster is dual stack because pods have two addresses (one per family)
|
||||
func getDefaultClusterIPFamily(c clientset.Interface) string {
|
||||
// Get the ClusterIP of the kubernetes service created in the default namespace
|
||||
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get kubernetes service ClusterIP: %v", err)
|
||||
}
|
||||
|
||||
if utilnet.IsIPv6String(svc.Spec.ClusterIP) {
|
||||
return "ipv6"
|
||||
}
|
||||
return "ipv4"
|
||||
}
|
||||
|
||||
// setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation
|
||||
// (such as deleting old namespaces, or verifying that all system pods are running.
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// to ensure that these operations only run on the first parallel Ginkgo node.
|
||||
//
|
||||
// This function takes two parameters: one function which runs on only the first Ginkgo node,
|
||||
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
|
||||
// accepting the byte array.
|
||||
func setupSuite() {
|
||||
// Run only on Ginkgo node 1
|
||||
|
||||
switch framework.TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
framework.LogClusterImageSources()
|
||||
}
|
||||
|
||||
c, err := framework.LoadClientset()
|
||||
if err != nil {
|
||||
klog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
|
||||
// Delete any namespaces except those created by the system. This ensures no
|
||||
// lingering resources are left over from a previous test run.
|
||||
if framework.TestContext.CleanStart {
|
||||
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
|
||||
[]string{
|
||||
metav1.NamespaceSystem,
|
||||
metav1.NamespaceDefault,
|
||||
metav1.NamespacePublic,
|
||||
v1.NamespaceNodeLease,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting orphaned namespaces: %v", err)
|
||||
}
|
||||
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
|
||||
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
|
||||
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
|
||||
}
|
||||
}
|
||||
|
||||
// In large clusters we may get to this point but still have a bunch
|
||||
// of nodes without Routes created. Since this would make a node
|
||||
// unschedulable, we need to wait until all of them are schedulable.
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
|
||||
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
|
||||
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
framework.ExpectNoError(err)
|
||||
framework.TestContext.CloudConfig.NumNodes = len(nodes.Items)
|
||||
}
|
||||
|
||||
// Ensure all pods are running and ready before starting tests (otherwise,
|
||||
// cluster infrastructure pods that are being pulled or started can block
|
||||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
|
||||
// TODO: In large clusters, we often observe a non-starting pods due to
|
||||
// #41007. To avoid those pods preventing the whole test runs (and just
|
||||
// wasting the whole run), we allow for some not-ready pods (with the
|
||||
// number equal to the number of allowed not-ready nodes).
|
||||
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
|
||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
}
|
||||
|
||||
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
|
||||
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
|
||||
}
|
||||
|
||||
// Log the version of the server and this client.
|
||||
framework.Logf("e2e test version: %s", version.Get().GitVersion)
|
||||
|
||||
dc := c.DiscoveryClient
|
||||
|
||||
serverVersion, serverErr := dc.ServerVersion()
|
||||
if serverErr != nil {
|
||||
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
|
||||
}
|
||||
if serverVersion != nil {
|
||||
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
|
||||
}
|
||||
|
||||
if framework.TestContext.NodeKiller.Enabled {
|
||||
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
|
||||
go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
// setupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation on each Ginkgo node
|
||||
// such as making some global variables accessible to all parallel executions
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// Ref: https://onsi.github.io/ginkgo/#parallel-specs
|
||||
func setupSuitePerGinkgoNode() {
|
||||
// Obtain the default IP family of the cluster
|
||||
// Some e2e test are designed to work on IPv4 only, this global variable
|
||||
// allows to adapt those tests to work on both IPv4 and IPv6
|
||||
// TODO: dual-stack
|
||||
// the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters,
|
||||
// and services use the primary IP family by default
|
||||
c, err := framework.LoadClientset()
|
||||
if err != nil {
|
||||
klog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
framework.TestContext.IPFamily = getDefaultClusterIPFamily(c)
|
||||
framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily)
|
||||
}
|
||||
|
@ -74,7 +74,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/version:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
@ -82,7 +81,6 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/resource:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/system:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
@ -94,7 +92,6 @@ go_library(
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -22,128 +22,9 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/component-base/version"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// SetupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation
|
||||
// (such as deleting old namespaces, or verifying that all system pods are running.
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// to ensure that these operations only run on the first parallel Ginkgo node.
|
||||
//
|
||||
// This function takes two parameters: one function which runs on only the first Ginkgo node,
|
||||
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
|
||||
// accepting the byte array.
|
||||
func SetupSuite() {
|
||||
// Run only on Ginkgo node 1
|
||||
|
||||
switch TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
LogClusterImageSources()
|
||||
}
|
||||
|
||||
c, err := LoadClientset()
|
||||
if err != nil {
|
||||
klog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
|
||||
// Delete any namespaces except those created by the system. This ensures no
|
||||
// lingering resources are left over from a previous test run.
|
||||
if TestContext.CleanStart {
|
||||
deleted, err := DeleteNamespaces(c, nil, /* deleteFilter */
|
||||
[]string{
|
||||
metav1.NamespaceSystem,
|
||||
metav1.NamespaceDefault,
|
||||
metav1.NamespacePublic,
|
||||
v1.NamespaceNodeLease,
|
||||
})
|
||||
if err != nil {
|
||||
Failf("Error deleting orphaned namespaces: %v", err)
|
||||
}
|
||||
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
|
||||
if err := WaitForNamespacesDeleted(c, deleted, NamespaceCleanupTimeout); err != nil {
|
||||
Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
|
||||
}
|
||||
}
|
||||
|
||||
// In large clusters we may get to this point but still have a bunch
|
||||
// of nodes without Routes created. Since this would make a node
|
||||
// unschedulable, we need to wait until all of them are schedulable.
|
||||
ExpectNoError(WaitForAllNodesSchedulable(c, TestContext.NodeSchedulableTimeout))
|
||||
|
||||
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
|
||||
if TestContext.CloudConfig.NumNodes == DefaultNumNodes {
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
ExpectNoError(err)
|
||||
TestContext.CloudConfig.NumNodes = len(nodes.Items)
|
||||
}
|
||||
|
||||
// Ensure all pods are running and ready before starting tests (otherwise,
|
||||
// cluster infrastructure pods that are being pulled or started can block
|
||||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
podStartupTimeout := TestContext.SystemPodsStartupTimeout
|
||||
// TODO: In large clusters, we often observe a non-starting pods due to
|
||||
// #41007. To avoid those pods preventing the whole test runs (and just
|
||||
// wasting the whole run), we allow for some not-ready pods (with the
|
||||
// number equal to the number of allowed not-ready nodes).
|
||||
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(TestContext.MinStartupPods), int32(TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
||||
DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||
LogFailedContainers(c, metav1.NamespaceSystem, Logf)
|
||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||
Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
}
|
||||
|
||||
if err := WaitForDaemonSets(c, metav1.NamespaceSystem, int32(TestContext.AllowedNotReadyNodes), TestContext.SystemDaemonsetStartupTimeout); err != nil {
|
||||
Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
|
||||
}
|
||||
|
||||
// Log the version of the server and this client.
|
||||
Logf("e2e test version: %s", version.Get().GitVersion)
|
||||
|
||||
dc := c.DiscoveryClient
|
||||
|
||||
serverVersion, serverErr := dc.ServerVersion()
|
||||
if serverErr != nil {
|
||||
Logf("Unexpected server error retrieving version: %v", serverErr)
|
||||
}
|
||||
if serverVersion != nil {
|
||||
Logf("kube-apiserver version: %s", serverVersion.GitVersion)
|
||||
}
|
||||
|
||||
if TestContext.NodeKiller.Enabled {
|
||||
nodeKiller := NewNodeKiller(TestContext.NodeKiller, c, TestContext.Provider)
|
||||
go nodeKiller.Run(TestContext.NodeKiller.NodeKillerStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
// SetupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation on each Ginkgo node
|
||||
// such as making some global variables accessible to all parallel executions
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// Ref: https://onsi.github.io/ginkgo/#parallel-specs
|
||||
func SetupSuitePerGinkgoNode() {
|
||||
// Obtain the default IP family of the cluster
|
||||
// Some e2e test are designed to work on IPv4 only, this global variable
|
||||
// allows to adapt those tests to work on both IPv4 and IPv6
|
||||
// TODO: dual-stack
|
||||
// the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters,
|
||||
// and services use the primary IP family by default
|
||||
c, err := LoadClientset()
|
||||
if err != nil {
|
||||
klog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
TestContext.IPFamily = getDefaultClusterIPFamily(c)
|
||||
Logf("Cluster IP family: %s", TestContext.IPFamily)
|
||||
}
|
||||
|
||||
// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
|
||||
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
|
||||
// Here, the order of functions is reversed; first, the function which runs everywhere,
|
||||
|
@ -77,11 +77,9 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -228,57 +226,6 @@ func RunIfSystemSpecNameIs(names ...string) {
|
||||
skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
|
||||
}
|
||||
|
||||
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
|
||||
// to flip to Ready, log its output and delete it.
|
||||
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
||||
path := "test/images/clusterapi-tester/pod.yaml"
|
||||
Logf("Parsing pod from %v", path)
|
||||
p, err := manifest.PodFromManifest(path)
|
||||
if err != nil {
|
||||
Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
|
||||
return
|
||||
}
|
||||
p.Namespace = ns
|
||||
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
|
||||
Logf("Failed to create %v: %v", p.Name, err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
|
||||
Logf("Failed to delete pod %v: %v", p.Name, err)
|
||||
}
|
||||
}()
|
||||
timeout := 5 * time.Minute
|
||||
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||
Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
||||
return
|
||||
}
|
||||
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||
if err != nil {
|
||||
Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
||||
} else {
|
||||
Logf("Output of clusterapi-tester:\n%v", logs)
|
||||
}
|
||||
}
|
||||
|
||||
// getDefaultClusterIPFamily obtains the default IP family of the cluster
|
||||
// using the Cluster IP address of the kubernetes service created in the default namespace
|
||||
// This unequivocally identifies the default IP family because services are single family
|
||||
// TODO: dual-stack may support multiple families per service
|
||||
// but we can detect if a cluster is dual stack because pods have two addresses (one per family)
|
||||
func getDefaultClusterIPFamily(c clientset.Interface) string {
|
||||
// Get the ClusterIP of the kubernetes service created in the default namespace
|
||||
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get kubernetes service ClusterIP: %v", err)
|
||||
}
|
||||
|
||||
if utilnet.IsIPv6String(svc.Spec.ClusterIP) {
|
||||
return "ipv6"
|
||||
}
|
||||
return "ipv4"
|
||||
}
|
||||
|
||||
// ProviderIs returns true if the provider is included is the providers. Otherwise false.
|
||||
func ProviderIs(providers ...string) bool {
|
||||
for _, provider := range providers {
|
||||
|
Loading…
Reference in New Issue
Block a user