mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Avoid creating multiple unnecessary clients in tests.
This commit is contained in:
parent
ae1fb82cfc
commit
b67a6e6d41
@ -107,14 +107,14 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
framework.Failf("Failed to setup provider config: %v", err)
|
||||
}
|
||||
|
||||
c, err := framework.LoadClient()
|
||||
if err != nil {
|
||||
glog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
|
||||
// Delete any namespaces except default and kube-system. This ensures no
|
||||
// lingering resources are left over from a previous test run.
|
||||
if framework.TestContext.CleanStart {
|
||||
c, err := framework.LoadClient()
|
||||
if err != nil {
|
||||
glog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
|
||||
deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault})
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting orphaned namespaces: %v", err)
|
||||
@ -129,18 +129,14 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
// cluster infrastructure pods that are being pulled or started can block
|
||||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
if err := framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil {
|
||||
if c, errClient := framework.LoadClient(); errClient != nil {
|
||||
framework.Logf("Unable to dump cluster information because: %v", errClient)
|
||||
} else {
|
||||
framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
|
||||
}
|
||||
framework.LogFailedContainers(api.NamespaceSystem)
|
||||
framework.RunKubernetesServiceTestContainer(framework.TestContext.RepoRoot, api.NamespaceDefault)
|
||||
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, api.NamespaceSystem)
|
||||
framework.RunKubernetesServiceTestContainer(c, framework.TestContext.RepoRoot, api.NamespaceDefault)
|
||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
}
|
||||
|
||||
if err := framework.WaitForPodsSuccess(api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
|
||||
if err := framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
|
||||
// There is no guarantee that the image pulling will succeed in 3 minutes
|
||||
// and we don't even run the image puller on all platforms (including GKE).
|
||||
// We wait for it so we get an indication of failures in the logs, and to
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func() {
|
||||
f := framework.NewDefaultFederatedFramework("federated-cluster")
|
||||
It("should allow creation of cluster api objects", func() {
|
||||
framework.SkipUnlessFederated()
|
||||
framework.SkipUnlessFederated(f.Client)
|
||||
|
||||
contexts := f.GetUnderlyingFederatedContexts()
|
||||
|
||||
|
@ -358,18 +358,13 @@ func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInter
|
||||
}
|
||||
|
||||
// Detects whether the federation namespace exists in the underlying cluster
|
||||
func SkipUnlessFederated() {
|
||||
c, err := LoadClient()
|
||||
if err != nil {
|
||||
Failf("Unable to load client: %v", err)
|
||||
}
|
||||
|
||||
func SkipUnlessFederated(c *client.Client) {
|
||||
federationNS := os.Getenv("FEDERATION_NAMESPACE")
|
||||
if federationNS == "" {
|
||||
federationNS = "federation-e2e"
|
||||
}
|
||||
|
||||
_, err = c.Namespaces().Get(federationNS)
|
||||
_, err := c.Namespaces().Get(federationNS)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
|
||||
@ -482,11 +477,7 @@ func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api
|
||||
// WaitForPodsSuccess waits till all labels matching the given selector enter
|
||||
// the Success state. The caller is expected to only invoke this method once the
|
||||
// pods have been created.
|
||||
func WaitForPodsSuccess(ns string, successPodLabels map[string]string, timeout time.Duration) error {
|
||||
c, err := LoadClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func WaitForPodsSuccess(c *client.Client, ns string, successPodLabels map[string]string, timeout time.Duration) error {
|
||||
successPodSelector := labels.SelectorFromSet(successPodLabels)
|
||||
start, badPods := time.Now(), []api.Pod{}
|
||||
|
||||
@ -533,11 +524,7 @@ func WaitForPodsSuccess(ns string, successPodLabels map[string]string, timeout t
|
||||
// returned even if there are minPods pods, some of which are in Running/Ready
|
||||
// and some in Success. This is to allow the client to decide if "Success"
|
||||
// means "Ready" or not.
|
||||
func WaitForPodsRunningReady(ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
c, err := LoadClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||
start := time.Now()
|
||||
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
@ -618,12 +605,7 @@ func podFromManifest(filename string) (*api.Pod, error) {
|
||||
|
||||
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
|
||||
// to flip to Ready, log its output and delete it.
|
||||
func RunKubernetesServiceTestContainer(repoRoot string, ns string) {
|
||||
c, err := LoadClient()
|
||||
if err != nil {
|
||||
Logf("Failed to load client")
|
||||
return
|
||||
}
|
||||
func RunKubernetesServiceTestContainer(c *client.Client, repoRoot string, ns string) {
|
||||
path := filepath.Join(repoRoot, "test", "images", "clusterapi-tester", "pod.yaml")
|
||||
p, err := podFromManifest(path)
|
||||
if err != nil {
|
||||
@ -667,12 +649,7 @@ func kubectlLogPod(c *client.Client, pod api.Pod) {
|
||||
}
|
||||
}
|
||||
|
||||
func LogFailedContainers(ns string) {
|
||||
c, err := LoadClient()
|
||||
if err != nil {
|
||||
Logf("Failed to load client")
|
||||
return
|
||||
}
|
||||
func LogFailedContainers(c *client.Client, ns string) {
|
||||
podList, err := c.Pods(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting pods in namespace '%s': %v", ns, err)
|
||||
@ -2366,7 +2343,7 @@ func (config *RCConfig) start() error {
|
||||
if startupStatus.FailedContainers > maxContainerFailures {
|
||||
DumpNodeDebugInfo(config.Client, startupStatus.ContainerRestartNodes.List())
|
||||
// Get the logs from the failed containers to help diagnose what caused them to fail
|
||||
LogFailedContainers(config.Namespace)
|
||||
LogFailedContainers(config.Client, config.Namespace)
|
||||
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
|
||||
}
|
||||
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
|
||||
|
@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
|
||||
|
||||
const ns = "static-pods"
|
||||
numpods := int32(len(nodelist.Items))
|
||||
framework.ExpectNoError(framework.WaitForPodsRunningReady(ns, numpods, wait.ForeverTestTimeout, map[string]string{}),
|
||||
framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}),
|
||||
fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods))
|
||||
})
|
||||
|
||||
|
@ -62,9 +62,7 @@ var _ = framework.KubeDescribe("PetSet", func() {
|
||||
// dynamic volume provisioner.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
var err error
|
||||
c, err = framework.LoadClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
c = f.Client
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
|
@ -398,7 +398,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
||||
// the cluster is restored to health.
|
||||
By("waiting for system pods to successfully restart")
|
||||
|
||||
err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||
err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
||||
}
|
||||
}
|
||||
|
||||
err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||
err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
|
@ -69,9 +69,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
||||
var c *client.Client
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
c, err = framework.LoadClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
c = f.Client
|
||||
})
|
||||
|
||||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||
|
Loading…
Reference in New Issue
Block a user