From b85adbf1fd9131a153141547fc391221532e1406 Mon Sep 17 00:00:00 2001 From: MorrisLaw Date: Fri, 22 Nov 2019 03:32:00 +0000 Subject: [PATCH] moved WriteFileViaContainer and ReadFileViaContainer to kubectl_utils --- test/e2e/apimachinery/crd_publish_openapi.go | 60 ++-- test/e2e/apimachinery/webhook.go | 2 +- test/e2e/apps/statefulset.go | 14 +- test/e2e/auth/BUILD | 10 +- test/e2e/auth/service_accounts.go | 8 +- .../autoscaling/cluster_size_autoscaling.go | 18 +- .../custom_metrics_stackdriver_autoscaling.go | 4 +- test/e2e/cloud/gcp/cluster_upgrade.go | 14 +- test/e2e/examples.go | 10 +- test/e2e/framework/framework.go | 78 ----- test/e2e/framework/ingress/ingress_utils.go | 8 +- test/e2e/framework/kubectl/BUILD | 3 + test/e2e/framework/kubectl/kubectl_utils.go | 89 +++++- test/e2e/framework/network/utils.go | 4 +- test/e2e/framework/nodes_util.go | 22 +- test/e2e/framework/service/resource.go | 2 +- test/e2e/framework/util.go | 49 ++- test/e2e/framework/volume/fixtures.go | 2 +- .../monitoring/custom_metrics_deployments.go | 20 +- .../monitoring/custom_metrics_stackdriver.go | 8 +- test/e2e/kubectl/kubectl.go | 298 +++++++++--------- test/e2e/kubectl/portforward.go | 2 +- test/e2e/network/example_cluster_dns.go | 6 +- test/e2e/network/service.go | 2 +- test/e2e/node/kubelet.go | 4 +- test/e2e/node/security_context.go | 8 +- test/e2e/storage/BUILD | 1 + test/e2e/storage/pd.go | 19 +- test/e2e/storage/testsuites/subpath.go | 2 +- test/e2e/storage/vsphere/vsphere_utils.go | 6 +- test/e2e/upgrades/cassandra.go | 2 +- test/e2e/upgrades/etcd.go | 2 +- test/e2e/upgrades/mysql.go | 2 +- test/e2e/windows/gmsa_full.go | 16 +- test/e2e/windows/gmsa_kubelet.go | 2 +- test/e2e/windows/memory_limits.go | 10 +- 36 files changed, 412 insertions(+), 395 deletions(-) diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index dc7e236ebf5..32fd1973866 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -72,55 +72,55 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("client-side validation (kubectl create and apply) allows request with known and required properties") validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta) - if _, err := framework.RunKubectlInput(validCR, ns, "create", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create valid CR %s: %v", validCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { framework.Failf("failed to delete valid CR: %v", err) } - if _, err := framework.RunKubectlInput(validCR, ns, "apply", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply valid CR %s: %v", validCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { framework.Failf("failed to delete valid CR: %v", err) } ginkgo.By("client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema") unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta) - if _, err := framework.RunKubectlInput(unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) { + if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) { framework.Failf("unexpected no error when creating CR with unknown field: %v", err) } - if _, err := framework.RunKubectlInput(unknownCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) { + if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) { framework.Failf("unexpected no error when applying CR with unknown field: %v", err) } ginkgo.By("client-side validation (kubectl create and apply) rejects request without required properties") noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta) - if _, err := framework.RunKubectlInput(noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) { + if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) { framework.Failf("unexpected no error when creating CR without required field: %v", err) } - if _, err := framework.RunKubectlInput(noRequireCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) { + if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) { framework.Failf("unexpected no error when applying CR without required field: %v", err) } ginkgo.By("kubectl explain works to explain CR properties") - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*.*APIVersion defines.*spec.*.*Specification of Foo`); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*.*APIVersion defines.*spec.*.*Specification of Foo`); err != nil { framework.Failf("%v", err) } ginkgo.By("kubectl explain works to explain CR properties recursively") - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*.*CreationTimestamp is a timestamp`); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*.*CreationTimestamp is a timestamp`); err != nil { framework.Failf("%v", err) } - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec", `(?s)DESCRIPTION:.*Specification of Foo.*FIELDS:.*bars.*<\[\]Object>.*List of Bars and their specs`); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural+".spec", `(?s)DESCRIPTION:.*Specification of Foo.*FIELDS:.*bars.*<\[\]Object>.*List of Bars and their specs`); err != nil { framework.Failf("%v", err) } - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec.bars", `(?s)RESOURCE:.*bars.*<\[\]Object>.*DESCRIPTION:.*List of Bars and their specs.*FIELDS:.*bazs.*<\[\]string>.*List of Bazs.*name.*.*Name of Bar`); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural+".spec.bars", `(?s)RESOURCE:.*bars.*<\[\]Object>.*DESCRIPTION:.*List of Bars and their specs.*FIELDS:.*bazs.*<\[\]string>.*List of Bazs.*name.*.*Name of Bar`); err != nil { framework.Failf("%v", err) } ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist") - if _, err := framework.RunKubectl("explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) { + if _, err := framework.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) { framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err) } @@ -147,21 +147,21 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties") randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) - if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } - if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } ginkgo.By("kubectl explain works to explain CR without validation schema") - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*`); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*`); err != nil { framework.Failf("%v", err) } @@ -188,21 +188,21 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties") randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) - if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } - if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } ginkgo.By("kubectl explain works to explain CR") - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, fmt.Sprintf(`(?s)KIND:.*%s`, crd.Crd.Spec.Names.Kind)); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, fmt.Sprintf(`(?s)KIND:.*%s`, crd.Crd.Spec.Names.Kind)); err != nil { framework.Failf("%v", err) } @@ -230,21 +230,21 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties") randomCR := fmt.Sprintf(`{%s,"spec":{"b":[{"c":"d"}]}}`, meta) - if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } - if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } ginkgo.By("kubectl explain works to explain CR") - if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*preserve-unknown-properties in nested field for Testing`); err != nil { + if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*preserve-unknown-properties in nested field for Testing`); err != nil { framework.Failf("%v", err) } @@ -663,8 +663,8 @@ func dropDefaults(s *spec.Schema) { delete(s.Extensions, "x-kubernetes-group-version-kind") } -func verifyKubectlExplain(name, pattern string) error { - result, err := framework.RunKubectl("explain", name) +func verifyKubectlExplain(ns, name, pattern string) error { + result, err := framework.RunKubectl(ns, "explain", name) if err != nil { return fmt.Errorf("failed to explain %s: %v", name, err) } diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index a54d481abc8..3b64949f678 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -1198,7 +1198,7 @@ func testAttachingPodWebhook(f *framework.Framework) { ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") timer := time.NewTimer(30 * time.Second) defer timer.Stop() - _, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() + _, err = framework.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook") if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) { framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 5272e7c7486..1a3a6943c6d 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -848,10 +848,10 @@ var _ = SIGDescribe("StatefulSet", func() { }) }) -func kubectlExecWithRetries(args ...string) (out string) { +func kubectlExecWithRetries(ns string, args ...string) (out string) { var err error for i := 0; i < 3; i++ { - if out, err = framework.RunKubectl(args...); err == nil { + if out, err = framework.RunKubectl(ns, args...); err == nil { return } framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) @@ -916,7 +916,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) { ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace) for k, v := range kv { cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v) - framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd)) + framework.Logf(framework.RunKubectlOrDie(z.ss.Namespace, "exec", ns, name, "--", "/bin/sh", "-c", cmd)) } } @@ -924,7 +924,7 @@ func (z *zookeeperTester) read(statefulPodIndex int, key string) string { name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex) ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace) cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key) - return lastLine(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd)) + return lastLine(framework.RunKubectlOrDie(z.ss.Namespace, "exec", ns, name, "--", "/bin/sh", "-c", cmd)) } type mysqlGaleraTester struct { @@ -941,7 +941,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string { // TODO: Find a readiness probe for mysql that guarantees writes will // succeed and ditch retries. Current probe only reads, so there's a window // for a race. - return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) + return kubectlExecWithRetries(ns, fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) } func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet { @@ -981,7 +981,7 @@ func (m *redisTester) name() string { func (m *redisTester) redisExec(cmd, ns, podName string) string { cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd) - return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) + return framework.RunKubectlOrDie(ns, fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) } func (m *redisTester) deploy(ns string) *appsv1.StatefulSet { @@ -1012,7 +1012,7 @@ func (c *cockroachDBTester) name() string { func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string { cmd = fmt.Sprintf("/cockroach/cockroach sql --insecure --host %s.cockroachdb -e \"%v\"", podName, cmd) - return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) + return framework.RunKubectlOrDie(ns, fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) } func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { diff --git a/test/e2e/auth/BUILD b/test/e2e/auth/BUILD index 515868770a0..168ed5aab69 100644 --- a/test/e2e/auth/BUILD +++ b/test/e2e/auth/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -19,6 +14,7 @@ go_library( "service_accounts.go", ], importpath = "k8s.io/kubernetes/test/e2e/auth", + visibility = ["//visibility:public"], deps = [ "//pkg/master/ports:go_default_library", "//pkg/security/apparmor:go_default_library", @@ -55,6 +51,7 @@ go_library( "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/job:go_default_library", + "//test/e2e/framework/kubectl:go_default_library", "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/utils:go_default_library", @@ -77,4 +74,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index e6c85900606..5cd6de4514c 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -224,11 +225,12 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) - mountedToken, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey)) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name) + mountedToken, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey)) framework.ExpectNoError(err) - mountedCA, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey)) + mountedCA, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey)) framework.ExpectNoError(err) - mountedNamespace, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey)) + mountedNamespace, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey)) framework.ExpectNoError(err) // CA and namespace should be identical diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index fb3c3c47e99..ad0a6351811 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -220,7 +220,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 0) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet() + installNvidiaDriversDaemonSet(f.Namespace.Name) ginkgo.By("Enable autoscaler") framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) @@ -247,7 +247,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 1) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet() + installNvidiaDriversDaemonSet(f.Namespace.Name) ginkgo.By("Schedule a single pod which requires GPU") framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) @@ -277,7 +277,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 0) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet() + installNvidiaDriversDaemonSet(f.Namespace.Name) ginkgo.By("Enable autoscaler") framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) @@ -306,7 +306,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 1) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet() + installNvidiaDriversDaemonSet(f.Namespace.Name) ginkgo.By("Schedule a single pod which requires GPU") framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) @@ -593,7 +593,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { if len(newNodesSet) > 1 { ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet)) klog.Infof("Usually only 1 new node is expected, investigating") - klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json")) + klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) if output, err := exec.Command("gcloud", "compute", "instances", "list", "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil { @@ -997,10 +997,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) -func installNvidiaDriversDaemonSet() { +func installNvidiaDriversDaemonSet(namespace string) { ginkgo.By("Add daemonset which installs nvidia drivers") // the link differs from one in GKE documentation; discussed with @mindprince this one should be used - framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml") + framework.RunKubectlOrDie(namespace, "apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml") } func execCmd(args ...string) *exec.Cmd { @@ -1400,8 +1400,8 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface klog.Infof("Too many pods are not ready yet: %v", notready) } klog.Info("Timeout on waiting for pods being ready") - klog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces")) - klog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json")) + klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces")) + klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) // Some pods are still not running. return fmt.Errorf("Too many pods are still not running: %v", notready) diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index c1f968f7d2a..779579b1bc8 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -258,11 +258,11 @@ func (tc *CustomMetricTestCase) Run() { } defer monitoring.CleanupDescriptors(gcmService, projectID) - err = monitoring.CreateAdapter(monitoring.AdapterDefault) + err = monitoring.CreateAdapter(tc.framework.Namespace.ObjectMeta.Name, monitoring.AdapterDefault) if err != nil { framework.Failf("Failed to set up: %v", err) } - defer monitoring.CleanupAdapter(monitoring.AdapterDefault) + defer monitoring.CleanupAdapter(tc.framework.Namespace.ObjectMeta.Name, monitoring.AdapterDefault) // Run application that exports the metric err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 8cd5fcac578..1807738ca92 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, masterUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, clusterUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target)) @@ -177,7 +177,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() { target := upgCtx.Versions[1].Version.String() framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target)) - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) @@ -225,7 +225,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, gpuUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) @@ -243,7 +243,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, gpuUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target)) @@ -265,7 +265,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { target := upgCtx.Versions[1].Version.String() framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target)) - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) @@ -291,7 +291,7 @@ var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", start := time.Now() defer finalizeUpgradeTest(start, statefulUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(framework.MasterUpgrade(target)) + framework.ExpectNoError(framework.MasterUpgrade(f, target)) framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target)) framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target)) diff --git a/test/e2e/examples.go b/test/e2e/examples.go index a129198cae7..1dfee3b7223 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -69,8 +69,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { httpYaml := readFile(test, "http-liveness.yaml.in") nsFlag := fmt.Sprintf("--namespace=%v", ns) - framework.RunKubectlOrDieInput(execYaml, "create", "-f", "-", nsFlag) - framework.RunKubectlOrDieInput(httpYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, execYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, httpYaml, "create", "-f", "-", nsFlag) // Since both containers start rapidly, we can easily run this test in parallel. var wg sync.WaitGroup @@ -120,8 +120,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { podName := "secret-test-pod" ginkgo.By("creating secret and pod") - framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag) - framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag) err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) @@ -139,7 +139,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { podName := "dapi-test-pod" ginkgo.By("creating the pod") - framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag) err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 72b3901ac83..61cb2f493ae 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -22,7 +22,6 @@ limitations under the License. package framework import ( - "bytes" "fmt" "io/ioutil" "math/rand" @@ -52,14 +51,12 @@ import ( "github.com/onsi/gomega" // TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) - e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) const ( - maxKubectlExecRetries = 5 // DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion. DefaultNamespaceDeletionTimeout = 5 * time.Minute ) @@ -502,35 +499,6 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp) } -// WriteFileViaContainer writes a file using kubectl exec echo > via specified container -// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters -func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error { - ginkgo.By("writing a file in the container") - allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - for _, c := range contents { - if !strings.ContainsRune(allowedCharacters, c) { - return fmt.Errorf("Unsupported character in string to write: %v", c) - } - } - command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path) - stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command) - if err != nil { - Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) - } - return err -} - -// ReadFileViaContainer reads a file using kubectl exec cat . -func (f *Framework) ReadFileViaContainer(podName, containerName string, path string) (string, error) { - ginkgo.By("reading a file in the container") - - stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path) - if err != nil { - Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) - } - return string(stdout), err -} - // CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once. func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) { var err error @@ -655,52 +623,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster { return nil } -func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) { - for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ { - if numRetries > 0 { - Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries) - } - - stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...) - if err != nil { - if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") { - // Retry on "i/o timeout" errors - Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) - continue - } - if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") { - // Retry on "container not found" errors - Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) - time.Sleep(2 * time.Second) - continue - } - } - - return stdOutBytes, stdErrBytes, err - } - err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries) - return nil, nil, err -} - -func kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) { - var stdout, stderr bytes.Buffer - cmdArgs := []string{ - "exec", - fmt.Sprintf("--namespace=%v", namespace), - podName, - fmt.Sprintf("-c=%v", containerName), - } - cmdArgs = append(cmdArgs, args...) - - tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath) - cmd := tk.KubectlCmd(cmdArgs...) - cmd.Stdout, cmd.Stderr = &stdout, &stderr - - Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " ")) - err := cmd.Run() - return stdout.Bytes(), stderr.Bytes(), err -} - // KubeDescribe is wrapper function for ginkgo describe. Adds namespacing. // TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401. func KubeDescribe(text string, body func()) bool { diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 1c178c346b6..3593cfdc3f3 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -446,10 +446,10 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri } j.Logger.Infof("creating replication controller") - framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) j.Logger.Infof("creating service") - framework.RunKubectlOrDieInput(read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) if len(svcAnnotations) > 0 { svcList, err := j.Client.CoreV1().Services(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) @@ -462,7 +462,7 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri if exists("secret.yaml") { j.Logger.Infof("creating secret") - framework.RunKubectlOrDieInput(read("secret.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, read("secret.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) } j.Logger.Infof("Parsing ingress from %v", filepath.Join(manifestPath, "ing.yaml")) @@ -904,7 +904,7 @@ func (cont *NginxIngressController) Init() { return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file))) } framework.Logf("initializing nginx ingress controller") - framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) + framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/framework/kubectl/BUILD b/test/e2e/framework/kubectl/BUILD index d97b35ea1c0..f3ede10d00b 100644 --- a/test/e2e/framework/kubectl/BUILD +++ b/test/e2e/framework/kubectl/BUILD @@ -13,6 +13,9 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/utils:go_default_library", + "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", + "//test/e2e/framework/log:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", ], ) diff --git a/test/e2e/framework/kubectl/kubectl_utils.go b/test/e2e/framework/kubectl/kubectl_utils.go index e46da16fabf..8009bcefbe3 100644 --- a/test/e2e/framework/kubectl/kubectl_utils.go +++ b/test/e2e/framework/kubectl/kubectl_utils.go @@ -17,10 +17,12 @@ limitations under the License. package kubectl import ( + "bytes" "fmt" "os/exec" "path/filepath" "strings" + "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,25 +31,34 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + + "github.com/onsi/ginkgo" ) -// TestKubeconfig is a struct containing the minimum attributes needed to run KubectlCmd. +const ( + maxKubectlExecRetries = 5 +) + +// TestKubeconfig is a struct containing the needed attributes from TestContext and Framework(Namespace). type TestKubeconfig struct { CertDir string Host string KubeConfig string KubeContext string KubectlPath string + Namespace string // Every test has at least one namespace unless creation is skipped } // NewTestKubeconfig returns a new Kubeconfig struct instance. -func NewTestKubeconfig(certdir string, host string, kubeconfig string, kubecontext string, kubectlpath string) *TestKubeconfig { +func NewTestKubeconfig(certdir, host, kubeconfig, kubecontext, kubectlpath, namespace string) *TestKubeconfig { return &TestKubeconfig{ CertDir: certdir, Host: host, KubeConfig: kubeconfig, KubeContext: kubecontext, KubectlPath: kubectlpath, + Namespace: namespace, } } @@ -116,3 +127,77 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string } } } + +// WriteFileViaContainer writes a file using kubectl exec echo > via specified container +// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters +func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, path string, contents string) error { + ginkgo.By("writing a file in the container") + allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + for _, c := range contents { + if !strings.ContainsRune(allowedCharacters, c) { + return fmt.Errorf("Unsupported character in string to write: %v", c) + } + } + command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path) + stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command) + if err != nil { + e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) + } + return err +} + +// ReadFileViaContainer reads a file using kubectl exec cat . +func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, path string) (string, error) { + ginkgo.By("reading a file in the container") + + stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path) + if err != nil { + e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) + } + return string(stdout), err +} + +func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) { + for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ { + if numRetries > 0 { + e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries) + } + + stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...) + if err != nil { + if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") { + // Retry on "i/o timeout" errors + e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) + continue + } + if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") { + // Retry on "container not found" errors + e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) + time.Sleep(2 * time.Second) + continue + } + } + + return stdOutBytes, stdErrBytes, err + } + err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries) + return nil, nil, err +} + +func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) { + var stdout, stderr bytes.Buffer + cmdArgs := []string{ + "exec", + fmt.Sprintf("--namespace=%v", namespace), + podName, + fmt.Sprintf("-c=%v", containerName), + } + cmdArgs = append(cmdArgs, args...) + + cmd := tk.KubectlCmd(cmdArgs...) + cmd.Stdout, cmd.Stderr = &stdout, &stderr + + e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " ")) + err := cmd.Run() + return stdout.Bytes(), stderr.Bytes(), err +} diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index f3204dfaefb..72b053318bb 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -185,7 +185,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets } framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name) desc, _ := framework.RunKubectl( - "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) + e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) framework.Logf(desc) } } @@ -423,7 +423,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string) }); pollErr != nil { framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName) desc, _ := framework.RunKubectl( - "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace)) + config.Namespace, "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace)) framework.Logf("%s", desc) framework.Failf("Timed out in %v: %v", retryTimeout, msg) } diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index cd27d62ddee..161f234f891 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -47,12 +47,12 @@ func EtcdUpgrade(targetStorage, targetVersion string) error { } // MasterUpgrade upgrades master node on GCE/GKE. -func MasterUpgrade(v string) error { +func MasterUpgrade(f *Framework, v string) error { switch TestContext.Provider { case "gce": return masterUpgradeGCE(v, false) case "gke": - return masterUpgradeGKE(v) + return masterUpgradeGKE(f.Namespace.Name, v) case "kubernetes-anywhere": return masterUpgradeKubernetesAnywhere(v) default: @@ -113,7 +113,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string { return args } -func masterUpgradeGKE(v string) error { +func masterUpgradeGKE(namespace string, v string) error { Logf("Upgrading master to %q", v) args := []string{ "container", @@ -131,7 +131,7 @@ func masterUpgradeGKE(v string) error { return err } - waitForSSHTunnels() + waitForSSHTunnels(namespace) return nil } @@ -181,7 +181,7 @@ func NodeUpgrade(f *Framework, v string, img string) error { case "gce": err = nodeUpgradeGCE(v, img, false) case "gke": - err = nodeUpgradeGKE(v, img) + err = nodeUpgradeGKE(f.Namespace.Name, v, img) default: err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider) } @@ -230,7 +230,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error { return err } -func nodeUpgradeGKE(v string, img string) error { +func nodeUpgradeGKE(namespace string, v string, img string) error { Logf("Upgrading nodes to version %q and image %q", v, img) nps, err := nodePoolsGKE() if err != nil { @@ -258,7 +258,7 @@ func nodeUpgradeGKE(v string, img string) error { return err } - waitForSSHTunnels() + waitForSSHTunnels(namespace) } return nil } @@ -290,18 +290,18 @@ func gceUpgradeScript() string { return TestContext.GCEUpgradeScript } -func waitForSSHTunnels() { +func waitForSSHTunnels(namespace string) { Logf("Waiting for SSH tunnels to establish") - RunKubectl("run", "ssh-tunnel-test", + RunKubectl(namespace, "run", "ssh-tunnel-test", "--image=busybox", "--restart=Never", "--command", "--", "echo", "Hello") - defer RunKubectl("delete", "pod", "ssh-tunnel-test") + defer RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test") // allow up to a minute for new ssh tunnels to establish wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { - _, err := RunKubectl("logs", "ssh-tunnel-test") + _, err := RunKubectl(namespace, "logs", "ssh-tunnel-test") return err == nil, nil }) } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index ed52f6dbb1a..87c17ac678f 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -105,7 +105,7 @@ func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(sv func DescribeSvc(ns string) { framework.Logf("\nOutput of kubectl describe svc:\n") desc, _ := framework.RunKubectl( - "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) + ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) framework.Logf(desc) } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index ae463ad1abc..1fca4a956e4 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -30,7 +30,6 @@ import ( "os" "os/exec" "path" - "path/filepath" "sort" "strconv" "strings" @@ -587,7 +586,7 @@ func Cleanup(filePath, ns string, selectors ...string) { if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) } - RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg) + RunKubectlOrDie(ns, "delete", "--grace-period=0", "-f", filePath, nsArg) AssertCleanup(ns, selectors...) } @@ -602,12 +601,12 @@ func AssertCleanup(ns string, selectors ...string) { verifyCleanupFunc := func() (bool, error) { e = nil for _, selector := range selectors { - resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg) + resources := RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers", nsArg) if resources != "" { e = fmt.Errorf("Resources left running after stop:\n%s", resources) return false, nil } - pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") + pods := RunKubectlOrDie(ns, "get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") if pods != "" { e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods) return false, nil @@ -629,7 +628,7 @@ func LookForStringInPodExec(ns, podName string, command []string, expectedString // use the first container args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"} args = append(args, command...) - return RunKubectlOrDie(args...) + return RunKubectlOrDie(ns, args...) }) } @@ -656,9 +655,9 @@ type KubectlBuilder struct { } // NewKubectlCommand returns a KubectlBuilder for running kubectl. -func NewKubectlCommand(args ...string) *KubectlBuilder { +func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder { b := new(KubectlBuilder) - tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath) + tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace) b.cmd = tk.KubectlCmd(args...) return b } @@ -688,14 +687,14 @@ func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder { } // ExecOrDie runs the kubectl executable or dies if error occurs. -func (b KubectlBuilder) ExecOrDie() string { +func (b KubectlBuilder) ExecOrDie(namespace string) string { str, err := b.Exec() // In case of i/o timeout error, try talking to the apiserver again after 2s before dying. // Note that we're still dying after retrying so that we can get visibility to triage it further. if isTimeout(err) { Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.") time.Sleep(2 * time.Second) - retryStr, retryErr := RunKubectl("version") + retryStr, retryErr := RunKubectl(namespace, "version") Logf("stdout: %q", retryStr) Logf("err: %v", retryErr) } @@ -754,23 +753,23 @@ func (b KubectlBuilder) Exec() (string, error) { } // RunKubectlOrDie is a convenience wrapper over kubectlBuilder -func RunKubectlOrDie(args ...string) string { - return NewKubectlCommand(args...).ExecOrDie() +func RunKubectlOrDie(namespace string, args ...string) string { + return NewKubectlCommand(namespace, args...).ExecOrDie(namespace) } // RunKubectl is a convenience wrapper over kubectlBuilder -func RunKubectl(args ...string) (string, error) { - return NewKubectlCommand(args...).Exec() +func RunKubectl(namespace string, args ...string) (string, error) { + return NewKubectlCommand(namespace, args...).Exec() } // RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin -func RunKubectlOrDieInput(data string, args ...string) string { - return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie() +func RunKubectlOrDieInput(namespace string, data string, args ...string) string { + return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace) } // RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin -func RunKubectlInput(data string, args ...string) (string, error) { - return NewKubectlCommand(args...).WithStdinData(data).Exec() +func RunKubectlInput(namespace string, data string, args ...string) (string, error) { + return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec() } // RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd @@ -1258,7 +1257,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. func RunHostCmd(ns, name, cmd string) (string, error) { - return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-x", "-c", cmd) + return RunKubectl(ns, "exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-x", "-c", cmd) } // RunHostCmdOrDie calls RunHostCmd and dies on error. @@ -1381,7 +1380,7 @@ func RestartKubelet(host string) error { } // RestartApiserver restarts the kube-apiserver. -func RestartApiserver(cs clientset.Interface) error { +func RestartApiserver(namespace string, cs clientset.Interface) error { // TODO: Make it work for all providers. if !ProviderIs("gce", "gke", "aws") { return fmt.Errorf("unsupported provider for RestartApiserver: %s", TestContext.Provider) @@ -1402,7 +1401,7 @@ func RestartApiserver(cs clientset.Interface) error { if err != nil { return err } - return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v' + return masterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v' } func sshRestartMaster() error { @@ -1546,7 +1545,7 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st // LookForStringInLog looks for the given string in the log of a specific pod container func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { - return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns)) + return RunKubectlOrDie(ns, "logs", podName, container, fmt.Sprintf("--namespace=%v", ns)) }) } @@ -1804,7 +1803,7 @@ func GetAllMasterAddresses(c clientset.Interface) []string { func DescribeIng(ns string) { Logf("\nOutput of kubectl describe ing:\n") desc, _ := RunKubectl( - "describe", "ing", fmt.Sprintf("--namespace=%v", ns)) + ns, "describe", "ing", fmt.Sprintf("--namespace=%v", ns)) Logf(desc) } @@ -1851,7 +1850,7 @@ func (f *Framework) NewAgnhostPod(name string, args ...string) *v1.Pod { // CreateEmptyFileOnPod creates empty file at given path on the pod. // TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored. func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error { - _, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath)) + _, err := RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath)) return err } @@ -1859,10 +1858,10 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err func DumpDebugInfo(c clientset.Interface, ns string) { sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) for _, s := range sl.Items { - desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns)) + desc, _ := RunKubectl(ns, "describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns)) Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc) - l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100") + l, _ := RunKubectl(ns, "logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100") Logf("\nLast 100 log lines of %v:\n%v", s.Name, l) } } diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 91fffc7f556..5f505670088 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -614,7 +614,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs fileName := fmt.Sprintf("/opt/%d/%s", i, test.File) commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...) } - out, err := framework.RunKubectl(commands...) + out, err := framework.RunKubectl(injectorPod.Namespace, commands...) framework.ExpectNoError(err, "failed: writing the contents: %s", out) } diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index 5390299828b..1d3fb91fcf1 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -251,11 +251,11 @@ func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) // CreateAdapter creates Custom Metrics - Stackdriver adapter // adapterDeploymentFile should be a filename for adapter deployment located in StagingDeploymentLocation -func CreateAdapter(adapterDeploymentFile string) error { +func CreateAdapter(namespace, adapterDeploymentFile string) error { // A workaround to make the work on GKE. GKE doesn't normally allow to create cluster roles, // which the adapter deployment does. The solution is to create cluster role binding for // cluster-admin role and currently used service account. - err := createClusterAdminBinding() + err := createClusterAdminBinding(namespace) if err != nil { return err } @@ -264,12 +264,12 @@ func CreateAdapter(adapterDeploymentFile string) error { if err != nil { return err } - stat, err := framework.RunKubectl("create", "-f", adapterURL) + stat, err := framework.RunKubectl(namespace, "create", "-f", adapterURL) framework.Logf(stat) return err } -func createClusterAdminBinding() error { +func createClusterAdminBinding(namespace string) error { stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account") if err != nil { framework.Logf(stderr) @@ -277,7 +277,7 @@ func createClusterAdminBinding() error { } serviceAccount := strings.TrimSpace(stdout) framework.Logf("current service account: %q", serviceAccount) - stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount) + stat, err := framework.RunKubectl(namespace, "create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount) framework.Logf(stat) return err } @@ -316,8 +316,8 @@ func CleanupDescriptors(service *gcm.Service, projectID string) { } // CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments. -func CleanupAdapter(adapterDeploymentFile string) { - stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile) +func CleanupAdapter(namespace, adapterDeploymentFile string) { + stat, err := framework.RunKubectl(namespace, "delete", "-f", adapterDeploymentFile) framework.Logf(stat) if err != nil { framework.Logf("Failed to delete adapter deployments: %s", err) @@ -326,11 +326,11 @@ func CleanupAdapter(adapterDeploymentFile string) { if err != nil { framework.Logf("Failed to delete adapter deployment file: %s", err) } - cleanupClusterAdminBinding() + cleanupClusterAdminBinding(namespace) } -func cleanupClusterAdminBinding() { - stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding) +func cleanupClusterAdminBinding(namespace string) { + stat, err := framework.RunKubectl(namespace, "delete", "clusterrolebinding", ClusterAdminBinding) framework.Logf(stat) if err != nil { framework.Logf("Failed to delete cluster admin binding: %s", err) diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index fe056e90f46..70ecb179fa0 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -112,11 +112,11 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c } defer CleanupDescriptors(gcmService, projectID) - err = CreateAdapter(adapterDeployment) + err = CreateAdapter(f.Namespace.Name, adapterDeployment) if err != nil { framework.Failf("Failed to set up: %s", err) } - defer CleanupAdapter(adapterDeployment) + defer CleanupAdapter(f.Namespace.Name, adapterDeployment) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) if err != nil { @@ -159,11 +159,11 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, defer CleanupDescriptors(gcmService, projectID) // Both deployments - for old and new resource model - expose External Metrics API. - err = CreateAdapter(AdapterForOldResourceModel) + err = CreateAdapter(f.Namespace.Name, AdapterForOldResourceModel) if err != nil { framework.Failf("Failed to set up: %s", err) } - defer CleanupAdapter(AdapterForOldResourceModel) + defer CleanupAdapter(f.Namespace.Name, AdapterForOldResourceModel) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) if err != nil { diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 930e3bddb4d..52d395f43c0 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -182,7 +182,7 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { } // support backward compatibility : file paths or raw json - since we are removing file path // dependencies from this test. - framework.RunKubectlOrDieInput(fileContents, "delete", "--grace-period=0", "--force", "-f", "-", nsArg) + framework.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-", nsArg) framework.AssertCleanup(ns, selectors...) } @@ -190,11 +190,11 @@ func readTestFileOrDie(file string) []byte { return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file)) } -func runKubectlRetryOrDie(args ...string) string { +func runKubectlRetryOrDie(ns string, args ...string) string { var err error var output string for i := 0; i < 5; i++ { - output, err = framework.RunKubectl(args...) + output, err = framework.RunKubectl(ns, args...) if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) { break } @@ -229,14 +229,14 @@ var _ = SIGDescribe("Kubectl alpha client", func() { }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "cronjobs", cjName, nsFlag) }) ginkgo.It("should create a CronJob", func() { framework.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceAlpha, f.Namespace.Name) schedule := "*/5 * * * ?" - framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1", + framework.RunKubectlOrDie(ns, "run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1", "--schedule="+schedule, "--image="+busyboxImage, nsFlag) ginkgo.By("verifying the CronJob " + cjName + " was created") sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) @@ -341,7 +341,7 @@ var _ = SIGDescribe("Kubectl client", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) ginkgo.By("creating a replication controller") - framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) @@ -354,15 +354,15 @@ var _ = SIGDescribe("Kubectl client", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) ginkgo.By("creating a replication controller") - framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("scaling down the replication controller") debugDiscovery() - framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("scaling up the replication controller") debugDiscovery() - framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) @@ -373,11 +373,11 @@ var _ = SIGDescribe("Kubectl client", func() { */ framework.ConformanceIt("should do a rolling update of a replication controller ", func() { ginkgo.By("creating the initial replication controller") - framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("rolling-update to new replication controller") debugDiscovery() - framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) validateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns) // Everything will hopefully be cleaned up when the namespace is deleted. }) @@ -411,7 +411,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("creating all guestbook components") forEachGBFile(func(contents string) { framework.Logf(contents) - framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) }) ginkgo.By("validating guestbook app") @@ -495,7 +495,7 @@ var _ = SIGDescribe("Kubectl client", func() { obj.SetName(obj.GetName() + randString) } - createObjValidateOutputAndCleanup(client, obj, resource) + createObjValidateOutputAndCleanup(f.Namespace.Name, client, obj, resource) } } }) @@ -506,7 +506,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.BeforeEach(func() { ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) - framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { @@ -515,7 +515,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec", func() { ginkgo.By("executing a command in the container") - execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") + execOutput := framework.RunKubectlOrDie(ns, "exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -525,13 +525,13 @@ var _ = SIGDescribe("Kubectl client", func() { for i := 0; i < len(veryLongData); i++ { veryLongData[i] = 'a' } - execOutput = framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData)) + execOutput = framework.RunKubectlOrDie(ns, "exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData)) framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output") ginkgo.By("executing a command in the container with noninteractive stdin") - execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). + execOutput = framework.NewKubectlCommand(ns, "exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). WithStdinData("abcd1234"). - ExecOrDie() + ExecOrDie(ns) if e, a := "abcd1234", execOutput; e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -545,9 +545,9 @@ var _ = SIGDescribe("Kubectl client", func() { defer closer.Close() ginkgo.By("executing a command in the container with pseudo-interactive stdin") - execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh"). + execOutput = framework.NewKubectlCommand(ns, "exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh"). WithStdinReader(r). - ExecOrDie() + ExecOrDie(ns) if e, a := "hi", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -555,7 +555,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec using resource/name", func() { ginkgo.By("executing a command in the container") - execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container") + execOutput := framework.RunKubectlOrDie(ns, "exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -575,9 +575,9 @@ var _ = SIGDescribe("Kubectl client", func() { for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { proxyLogs.Reset() ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar) - output := framework.NewKubectlCommand(fmt.Sprintf("--namespace=%s", ns), "exec", "httpd", "echo", "running", "in", "container"). + output := framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%s", ns), "exec", "httpd", "echo", "running", "in", "container"). WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))). - ExecOrDie() + ExecOrDie(ns) // Verify we got the normal output captured by the exec server expectedExecOutput := "running in container\n" @@ -602,7 +602,7 @@ var _ = SIGDescribe("Kubectl client", func() { } ginkgo.By("Starting kubectl proxy") - port, proxyCmd, err := startProxyServer() + port, proxyCmd, err := startProxyServer(ns) framework.ExpectNoError(err) defer framework.TryKill(proxyCmd) @@ -610,9 +610,9 @@ var _ = SIGDescribe("Kubectl client", func() { host := fmt.Sprintf("--server=http://127.0.0.1:%d", port) ginkgo.By("Running kubectl via kubectl proxy using " + host) output := framework.NewKubectlCommand( - host, fmt.Sprintf("--namespace=%s", ns), + ns, host, fmt.Sprintf("--namespace=%s", ns), "exec", "httpd", "echo", "running", "in", "container", - ).ExecOrDie() + ).ExecOrDie(ns) // Verify we got the normal output captured by the exec server expectedExecOutput := "running in container\n" @@ -625,40 +625,40 @@ var _ = SIGDescribe("Kubectl client", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) ginkgo.By("execing into a container with a successful command") - _, err := framework.NewKubectlCommand(nsFlag, "exec", "httpd", "--", "/bin/sh", "-c", "exit 0").Exec() + _, err := framework.NewKubectlCommand(ns, nsFlag, "exec", "httpd", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) ginkgo.By("execing into a container with a failing command") - _, err = framework.NewKubectlCommand(nsFlag, "exec", "httpd", "--", "/bin/sh", "-c", "exit 42").Exec() + _, err = framework.NewKubectlCommand(ns, nsFlag, "exec", "httpd", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) framework.ExpectEqual(ok, true) framework.ExpectEqual(ee.ExitStatus(), 42) ginkgo.By("running a successful command") - _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec() + _, err = framework.NewKubectlCommand(ns, nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) ginkgo.By("running a failing command") - _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() + _, err = framework.NewKubectlCommand(ns, nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok = err.(uexec.ExitError) framework.ExpectEqual(ok, true) framework.ExpectEqual(ee.ExitStatus(), 42) ginkgo.By("running a failing command without --restart=Never") - _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). + _, err = framework.NewKubectlCommand(ns, nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) ginkgo.By("running a failing command without --restart=Never, but with --rm") - _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). + _, err = framework.NewKubectlCommand(ns, nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) ginkgo.By("running a failing command with --leave-stdin-open") - _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). + _, err = framework.NewKubectlCommand(ns, nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) @@ -669,9 +669,9 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command with run and attach with stdin") // We wait for a non-empty line so we know kubectl has attached - runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "while [ -z \"$s\" ]; do read s; sleep 1; done; echo read:$s && cat && echo 'stdin closed'"). + runOutput := framework.NewKubectlCommand(ns, nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "while [ -z \"$s\" ]; do read s; sleep 1; done; echo read:$s && cat && echo 'stdin closed'"). WithStdinData("value\nabcd1234"). - ExecOrDie() + ExecOrDie(ns) gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value")) gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) @@ -679,17 +679,17 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach without stdin") - runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). + runOutput = framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). - ExecOrDie() + ExecOrDie(ns) gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") - runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + runOutput = framework.NewKubectlCommand(ns, nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234\n"). - ExecOrDie() + ExecOrDie(ns) gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed")) g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) @@ -704,7 +704,7 @@ var _ = SIGDescribe("Kubectl client", func() { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } - logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) + logOutput := framework.RunKubectlOrDie(ns, nsFlag, "logs", runTestPod.Name) gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) return strings.Contains(logOutput, "abcd1234"), nil }) @@ -718,13 +718,13 @@ var _ = SIGDescribe("Kubectl client", func() { podName := "run-log-test" ginkgo.By("executing a command with run") - framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") + framework.RunKubectlOrDie(ns, "run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { framework.Failf("Pod for run-log-test was not ready") } - logOutput := framework.RunKubectlOrDie(nsFlag, "logs", "-f", "run-log-test") + logOutput := framework.RunKubectlOrDie(ns, nsFlag, "logs", "-f", "run-log-test") gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF")) }) @@ -772,7 +772,7 @@ var _ = SIGDescribe("Kubectl client", func() { inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT")) inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort) framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName) - framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") + framework.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") // Build a kubeconfig file that will make use of the injected ca and token, // but point at the DNS host and the default namespace @@ -802,7 +802,7 @@ users: tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token `), os.FileMode(0755))) framework.Logf("copying override kubeconfig to the %s pod", simplePodName) - framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") + framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(` kind: ConfigMap @@ -818,8 +818,8 @@ metadata: name: "configmap without namespace and invalid name" `), os.FileMode(0755))) framework.Logf("copying configmap manifests to the %s pod", simplePodName) - framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") - framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") + framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") + framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") ginkgo.By("getting pods with in-cluster configs") execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1") @@ -878,7 +878,7 @@ metadata: */ framework.ConformanceIt("should check if v1 is in available api versions ", func() { ginkgo.By("validating api versions") - output := framework.RunKubectlOrDie("api-versions") + output := framework.RunKubectlOrDie(ns, "api-versions") if !strings.Contains(output, "v1") { framework.Failf("No v1 in kubectl api-versions") } @@ -888,12 +888,12 @@ metadata: ginkgo.Describe("Kubectl get componentstatuses", func() { ginkgo.It("should get componentstatuses", func() { ginkgo.By("getting list of componentstatuses") - output := framework.RunKubectlOrDie("get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") + output := framework.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") components := strings.Split(output, " ") ginkgo.By("getting details of componentstatuses") for _, component := range components { ginkgo.By("getting status of " + component) - framework.RunKubectlOrDie("get", "componentstatuses", component) + framework.RunKubectlOrDie(ns, "get", "componentstatuses", component) } }) }) @@ -904,12 +904,12 @@ metadata: nsFlag := fmt.Sprintf("--namespace=%v", ns) ginkgo.By("creating Agnhost RC") - framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-", nsFlag) ginkgo.By("applying a modified configuration") stdin := modifyReplicationControllerConfiguration(controllerJSON) - framework.NewKubectlCommand("apply", "-f", "-", nsFlag). + framework.NewKubectlCommand(ns, "apply", "-f", "-", nsFlag). WithStdinReader(stdin). - ExecOrDie() + ExecOrDie(ns) ginkgo.By("checking the result") forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration) }) @@ -918,16 +918,16 @@ metadata: nsFlag := fmt.Sprintf("--namespace=%v", ns) ginkgo.By("creating Agnhost SVC") - framework.RunKubectlOrDieInput(string(serviceJSON[:]), "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-", nsFlag) ginkgo.By("getting the original port") - originalNodePort := framework.RunKubectlOrDie("get", "service", "agnhost-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") + originalNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") ginkgo.By("applying the same configuration") - framework.RunKubectlOrDieInput(string(serviceJSON[:]), "apply", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-", nsFlag) ginkgo.By("getting the port after applying configuration") - currentNodePort := framework.RunKubectlOrDie("get", "service", "agnhost-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") + currentNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") ginkgo.By("checking the result") if originalNodePort != currentNodePort { @@ -942,20 +942,20 @@ metadata: nsFlag := fmt.Sprintf("--namespace=%v", ns) ginkgo.By("deployment replicas number is 2") - framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-", nsFlag) ginkgo.By("check the last-applied matches expectations annotations") - output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") + output := framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString := "\"replicas\": 2" if !strings.Contains(output, requiredString) { framework.Failf("Missing %s in kubectl view-last-applied", requiredString) } ginkgo.By("apply file doesn't have replicas") - framework.RunKubectlOrDieInput(deployment2Yaml, "apply", "set-last-applied", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-", nsFlag) ginkgo.By("check last-applied has been updated, annotations doesn't have replicas") - output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") + output = framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString = "\"replicas\": 2" if strings.Contains(output, requiredString) { framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) @@ -964,13 +964,13 @@ metadata: ginkgo.By("scale set replicas to 3") httpdDeploy := "httpd-deployment" debugDiscovery() - framework.RunKubectlOrDie("scale", "deployment", httpdDeploy, "--replicas=3", nsFlag) + framework.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3", nsFlag) ginkgo.By("apply file doesn't have replicas but image changed") - framework.RunKubectlOrDieInput(deployment3Yaml, "apply", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-", nsFlag) ginkgo.By("verify replicas still is 3 and image has been updated") - output = framework.RunKubectlOrDieInput(deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json") + output = framework.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json") requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)} for _, item := range requiredItems { if !strings.Contains(output, item) { @@ -1114,7 +1114,7 @@ metadata: */ framework.ConformanceIt("should check if Kubernetes master services is included in cluster-info ", func() { ginkgo.By("validating cluster-info") - output := framework.RunKubectlOrDie("cluster-info") + output := framework.RunKubectlOrDie(ns, "cluster-info") // Can't check exact strings due to terminal control commands (colors) requiredItems := []string{"Kubernetes master", "is running at"} for _, item := range requiredItems { @@ -1128,7 +1128,7 @@ metadata: ginkgo.Describe("Kubectl cluster-info dump", func() { ginkgo.It("should check if cluster-info dump succeeds", func() { ginkgo.By("running cluster-info dump") - framework.RunKubectlOrDie("cluster-info", "dump") + framework.RunKubectlOrDie(ns, "cluster-info", "dump") }) }) @@ -1143,15 +1143,15 @@ metadata: serviceJSON := readTestFileOrDie(agnhostServiceFilename) nsFlag := fmt.Sprintf("--namespace=%v", ns) - framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) - framework.RunKubectlOrDieInput(string(serviceJSON[:]), "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-", nsFlag) ginkgo.By("Waiting for Agnhost master to start.") waitForOrFailWithDebug(1) // Pod forEachPod(func(pod v1.Pod) { - output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag) + output := framework.RunKubectlOrDie(ns, "describe", "pod", pod.Name, nsFlag) requiredStrings := [][]string{ {"Name:", "agnhost-master-"}, {"Namespace:", ns}, @@ -1182,10 +1182,10 @@ metadata: {"Pod Template:"}, {"Image:", agnhostImage}, {"Events:"}} - checkKubectlOutputWithRetry(requiredStrings, "describe", "rc", "agnhost-master", nsFlag) + checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-master", nsFlag) // Service - output := framework.RunKubectlOrDie("describe", "service", "agnhost-master", nsFlag) + output := framework.RunKubectlOrDie(ns, "describe", "service", "agnhost-master", nsFlag) requiredStrings = [][]string{ {"Name:", "agnhost-master"}, {"Namespace:", ns}, @@ -1205,7 +1205,7 @@ metadata: nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) node := nodes.Items[0] - output = framework.RunKubectlOrDie("describe", "node", node.Name) + output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name) requiredStrings = [][]string{ {"Name:", node.Name}, {"Labels:"}, @@ -1225,7 +1225,7 @@ metadata: checkOutput(output, requiredStrings) // Namespace - output = framework.RunKubectlOrDie("describe", "namespace", ns) + output = framework.RunKubectlOrDie(ns, "describe", "namespace", ns) requiredStrings = [][]string{ {"Name:", ns}, {"Labels:"}, @@ -1240,7 +1240,7 @@ metadata: ginkgo.By("creating a cronjob") nsFlag := fmt.Sprintf("--namespace=%v", ns) cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml"))) - framework.RunKubectlOrDieInput(cronjobYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-", nsFlag) ginkgo.By("waiting for cronjob to start.") err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { @@ -1253,7 +1253,7 @@ metadata: framework.ExpectNoError(err) ginkgo.By("verifying kubectl describe prints") - output := framework.RunKubectlOrDie("describe", "cronjob", "cronjob-test", nsFlag) + output := framework.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test", nsFlag) requiredStrings := [][]string{ {"Name:", "cronjob-test"}, {"Namespace:", ns}, @@ -1288,7 +1288,7 @@ metadata: ginkgo.By("creating Agnhost RC") framework.Logf("namespace %v", ns) - framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-", nsFlag) // It may take a while for the pods to get registered in some cases, wait to be sure. ginkgo.By("Waiting for Agnhost master to start.") @@ -1346,12 +1346,12 @@ metadata: } ginkgo.By("exposing RC") - framework.RunKubectlOrDie("expose", "rc", "agnhost-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort), nsFlag) + framework.RunKubectlOrDie(ns, "expose", "rc", "agnhost-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort), nsFlag) framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm2", 1234, framework.ServiceStartTimeout) ginkgo.By("exposing service") - framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort), nsFlag) + framework.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort), nsFlag) framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm3", 2345, framework.ServiceStartTimeout) }) @@ -1364,7 +1364,7 @@ metadata: ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) nsFlag = fmt.Sprintf("--namespace=%v", ns) - framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag) gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { @@ -1381,17 +1381,17 @@ metadata: labelValue := "testing-label-value" ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod") - framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"="+labelValue, nsFlag) + framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue, nsFlag) ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) - output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) + output := framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName, nsFlag) if !strings.Contains(output, labelValue) { framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) } ginkgo.By("removing the label " + labelName + " of a pod") - framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"-", nsFlag) + framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-", nsFlag) ginkgo.By("verifying the pod doesn't have the label " + labelName) - output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) + output = framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName, nsFlag) if strings.Contains(output, labelValue) { framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) } @@ -1405,7 +1405,7 @@ metadata: ginkgo.By("creating the pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) - framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag) gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { @@ -1426,7 +1426,7 @@ metadata: } ginkgo.By("specifying a remote filepath " + podSource + " on the pod") - framework.RunKubectlOrDie("cp", podSource, tempDestination.Name(), nsFlag) + framework.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name(), nsFlag) ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) localData, err := ioutil.ReadAll(tempDestination) if err != nil { @@ -1446,10 +1446,10 @@ metadata: ginkgo.By("creating an pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) // Agnhost image generates logs for a total of 100 lines over 20s. - framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+agnhostImage, nsFlag, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") + framework.RunKubectlOrDie(ns, "run", podName, "--generator=run-pod/v1", "--image="+agnhostImage, nsFlag, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "pod", podName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "pod", podName, nsFlag) }) /* @@ -1480,19 +1480,19 @@ metadata: framework.ExpectNoError(err) ginkgo.By("limiting log lines") - out := framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--tail=1") + out := framework.RunKubectlOrDie(ns, "logs", podName, containerName, nsFlag, "--tail=1") framework.Logf("got output %q", out) gomega.Expect(len(out)).NotTo(gomega.BeZero()) framework.ExpectEqual(len(lines(out)), 1) ginkgo.By("limiting log bytes") - out = framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--limit-bytes=1") + out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, nsFlag, "--limit-bytes=1") framework.Logf("got output %q", out) framework.ExpectEqual(len(lines(out)), 1) framework.ExpectEqual(len(out), 1) ginkgo.By("exposing timestamps") - out = framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--tail=1", "--timestamps") + out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, nsFlag, "--tail=1", "--timestamps") framework.Logf("got output %q", out) l := lines(out) framework.ExpectEqual(len(l), 1) @@ -1509,9 +1509,9 @@ metadata: // because the granularity is only 1 second and // it could end up rounding the wrong way. time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s - recentOut := framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--since=1s") + recentOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, nsFlag, "--since=1s") recent := len(strings.Split(recentOut, "\n")) - olderOut := framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--since=24h") + olderOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, nsFlag, "--since=24h") older := len(strings.Split(olderOut, "\n")) gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut) }) @@ -1527,12 +1527,12 @@ metadata: controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) ginkgo.By("creating Agnhost RC") - framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-", nsFlag) ginkgo.By("Waiting for Agnhost master to start.") waitForOrFailWithDebug(1) ginkgo.By("patching all pods") forEachPod(func(pod v1.Pod) { - framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") + framework.RunKubectlOrDie(ns, "patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") }) ginkgo.By("checking annotations") @@ -1558,7 +1558,7 @@ metadata: Description: The command ‘kubectl version’ MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to. */ framework.ConformanceIt("should check is all data is printed ", func() { - version := framework.RunKubectlOrDie("version") + version := framework.RunKubectlOrDie(ns, "version") requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"} for _, item := range requiredItems { if !strings.Contains(version, item) { @@ -1577,7 +1577,7 @@ metadata: ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) name = "e2e-test-httpd-deployment" - cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) } + cleanUp = func() { framework.RunKubectlOrDie(ns, "delete", "deployment", name, nsFlag) } }) ginkgo.AfterEach(func() { @@ -1591,7 +1591,7 @@ metadata: */ framework.ConformanceIt("should create an rc or deployment from an image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", name, "--image="+httpdImage, nsFlag) + framework.RunKubectlOrDie(ns, "run", name, "--image="+httpdImage, nsFlag) ginkgo.By("verifying the pod controlled by " + name + " gets created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) @@ -1600,7 +1600,7 @@ metadata: } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage { - framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) + framework.RunKubectlOrDie(ns, "get", "pods", "-L", "run", nsFlag) framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods)) } }) @@ -1616,7 +1616,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "rc", rcName, nsFlag) }) /* @@ -1626,7 +1626,7 @@ metadata: */ framework.ConformanceIt("should create an rc from an image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", rcName, "--image="+httpdImage, "--generator=run/v1", nsFlag) + framework.RunKubectlOrDie(ns, "run", rcName, "--image="+httpdImage, "--generator=run/v1", nsFlag) ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { @@ -1645,7 +1645,7 @@ metadata: } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage { - framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) + framework.RunKubectlOrDie(ns, "get", "pods", "-L", "run", nsFlag) framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods)) } @@ -1657,7 +1657,7 @@ metadata: if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { framework.Failf("Pods for rc %s were not ready", rcName) } - _, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag) + _, err = framework.RunKubectl(ns, "logs", "rc/"+rcName, nsFlag) // a non-nil error is fine as long as we actually found a pod. if err != nil && !strings.Contains(err.Error(), " in pod ") { framework.Failf("Failed getting logs by rc %s: %v", rcName, err) @@ -1677,7 +1677,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "rc", rcName, nsFlag) }) /* @@ -1687,7 +1687,7 @@ metadata: */ framework.ConformanceIt("should support rolling-update to same image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", rcName, "--image="+httpdImage, "--generator=run/v1", nsFlag) + framework.RunKubectlOrDie(ns, "run", rcName, "--image="+httpdImage, "--generator=run/v1", nsFlag) ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { @@ -1702,7 +1702,7 @@ metadata: ginkgo.By("rolling-update to same image controller") debugDiscovery() - runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+httpdImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag) + runKubectlRetryOrDie(ns, "rolling-update", rcName, "--update-period=1s", "--image="+httpdImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag) validateController(c, httpdImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns) }) }) @@ -1718,7 +1718,7 @@ metadata: ginkgo.AfterEach(func() { err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { - out, err := framework.RunKubectl("delete", "deployment", dName, nsFlag) + out, err := framework.RunKubectl(ns, "delete", "deployment", dName, nsFlag) if err != nil { if strings.Contains(err.Error(), "could not find default credentials") { err = nil @@ -1737,7 +1737,7 @@ metadata: */ framework.ConformanceIt("should create a deployment from an image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", dName, "--image="+httpdImage, "--generator=deployment/apps.v1", nsFlag) + framework.RunKubectlOrDie(ns, "run", dName, "--image="+httpdImage, "--generator=deployment/apps.v1", nsFlag) ginkgo.By("verifying the deployment " + dName + " was created") d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{}) if err != nil { @@ -1756,7 +1756,7 @@ metadata: } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage { - framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) + framework.RunKubectlOrDie(ns, "get", "pods", "-L", "run", nsFlag) framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods)) } }) @@ -1772,7 +1772,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "jobs", jobName, nsFlag) }) /* @@ -1782,7 +1782,7 @@ metadata: */ framework.ConformanceIt("should create a job from an image when restart is OnFailure ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+httpdImage, nsFlag) + framework.RunKubectlOrDie(ns, "run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+httpdImage, nsFlag) ginkgo.By("verifying the job " + jobName + " was created") job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) if err != nil { @@ -1808,14 +1808,14 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "cronjobs", cjName, nsFlag) }) ginkgo.It("should create a CronJob", func() { framework.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceBeta, f.Namespace.Name) schedule := "*/5 * * * ?" - framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v1beta1", + framework.RunKubectlOrDie(ns, "run", cjName, "--restart=OnFailure", "--generator=cronjob/v1beta1", "--schedule="+schedule, "--image="+busyboxImage, nsFlag) ginkgo.By("verifying the CronJob " + cjName + " was created") cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) @@ -1845,7 +1845,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "pods", podName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "pods", podName, nsFlag) }) /* @@ -1855,7 +1855,7 @@ metadata: */ framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+httpdImage, nsFlag) + framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+httpdImage, nsFlag) ginkgo.By("verifying the pod " + podName + " was created") pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { @@ -1881,7 +1881,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie("delete", "pods", podName, nsFlag) + framework.RunKubectlOrDie(ns, "delete", "pods", podName, nsFlag) }) /* @@ -1891,7 +1891,7 @@ metadata: */ framework.ConformanceIt("should update a single-container pod's image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+httpdImage, "--labels=run="+podName, nsFlag) + framework.RunKubectlOrDie(ns, "run", podName, "--generator=run-pod/v1", "--image="+httpdImage, "--labels=run="+podName, nsFlag) ginkgo.By("verifying the pod " + podName + " is running") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) @@ -1901,14 +1901,14 @@ metadata: } ginkgo.By("verifying the pod " + podName + " was created") - podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") + podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, nsFlag, "-o", "json") if !strings.Contains(podJSON, podName) { framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) } ginkgo.By("replace the image in the pod") podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1) - framework.RunKubectlOrDieInput(podJSON, "replace", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-", nsFlag) ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) @@ -1936,10 +1936,10 @@ metadata: ginkgo.By("executing a command with run --rm and attach with stdin") t := time.NewTimer(runJobTimeout) defer t.Stop() - runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + runOutput := framework.NewKubectlCommand(ns, nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). WithTimeout(t.C). - ExecOrDie() + ExecOrDie(ns) gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) @@ -1962,7 +1962,7 @@ metadata: */ framework.ConformanceIt("should support proxy with --port 0 ", func() { ginkgo.By("starting the proxy server") - port, cmd, err := startProxyServer() + port, cmd, err := startProxyServer(ns) if cmd != nil { defer framework.TryKill(cmd) } @@ -1994,7 +1994,7 @@ metadata: path := filepath.Join(tmpdir, "test") defer os.Remove(path) defer os.Remove(tmpdir) - tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) cmd := tk.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path)) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { @@ -2028,11 +2028,11 @@ metadata: nodeName := scheduling.GetNodeThatCanRunPod(f) ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") - runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString()) + runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) ginkgo.By("verifying the node has the taint " + testTaint.ToString()) - output := runKubectlRetryOrDie("describe", "node", nodeName) + output := runKubectlRetryOrDie(ns, "describe", "node", nodeName) requiredStrings := [][]string{ {"Name:", nodeName}, {"Taints:"}, @@ -2041,9 +2041,9 @@ metadata: checkOutput(output, requiredStrings) ginkgo.By("removing the taint " + testTaint.ToString() + " of a node") - runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-") + runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-") ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) - output = runKubectlRetryOrDie("describe", "node", nodeName) + output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) } @@ -2059,11 +2059,11 @@ metadata: nodeName := scheduling.GetNodeThatCanRunPod(f) ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") - runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString()) + runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) ginkgo.By("verifying the node has the taint " + testTaint.ToString()) - output := runKubectlRetryOrDie("describe", "node", nodeName) + output := runKubectlRetryOrDie(ns, "describe", "node", nodeName) requiredStrings := [][]string{ {"Name:", nodeName}, {"Taints:"}, @@ -2077,11 +2077,11 @@ metadata: Effect: v1.TaintEffectPreferNoSchedule, } ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node") - runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString()) + runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint) ginkgo.By("verifying the node has the taint " + newTestTaint.ToString()) - output = runKubectlRetryOrDie("describe", "node", nodeName) + output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) requiredStrings = [][]string{ {"Name:", nodeName}, {"Taints:"}, @@ -2095,11 +2095,11 @@ metadata: Effect: v1.TaintEffectNoExecute, } ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node") - runKubectlRetryOrDie("taint", "nodes", nodeName, noExecuteTaint.ToString()) + runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint) ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString()) - output = runKubectlRetryOrDie("describe", "node", nodeName) + output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) requiredStrings = [][]string{ {"Name:", nodeName}, {"Taints:"}, @@ -2108,9 +2108,9 @@ metadata: checkOutput(output, requiredStrings) ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node") - runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+"-") + runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+"-") ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) - output = runKubectlRetryOrDie("describe", "node", nodeName) + output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) } @@ -2123,7 +2123,7 @@ metadata: quotaName := "million" ginkgo.By("calling kubectl quota") - framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) + framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) @@ -2152,7 +2152,7 @@ metadata: quotaName := "scopes" ginkgo.By("calling kubectl quota") - framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) + framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) @@ -2180,7 +2180,7 @@ metadata: quotaName := "scopes" ginkgo.By("calling kubectl quota") - out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) + out, err := framework.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) if err == nil { framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) } @@ -2215,10 +2215,10 @@ func checkOutput(output string, required [][]string) { } } -func checkKubectlOutputWithRetry(required [][]string, args ...string) { +func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) { var pollErr error wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - output := framework.RunKubectlOrDie(args...) + output := framework.RunKubectlOrDie(namespace, args...) err := checkOutputReturnError(output, required) if err != nil { pollErr = err @@ -2249,9 +2249,9 @@ func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) { return &apiVersions, nil } -func startProxyServer() (int, *exec.Cmd, error) { +func startProxyServer(ns string) (int, *exec.Cmd, error) { // Specifying port 0 indicates we want the os to pick a random port. - tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) cmd := tk.KubectlCmd("proxy", "-p", "0", "--disable-filter") stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { @@ -2491,17 +2491,17 @@ func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) { func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error { ns := fmt.Sprintf("--namespace=%v", namespace) ginkgo.By("successfully create CR") - if _, err := framework.RunKubectlInput(resource, ns, "create", "--validate=true", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(namespace, resource, ns, "create", "--validate=true", "-f", "-"); err != nil { return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, ns, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { + if _, err := framework.RunKubectl(namespace, ns, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { return fmt.Errorf("failed to delete CR %s: %v", name, err) } ginkgo.By("successfully apply CR") - if _, err := framework.RunKubectlInput(resource, ns, "apply", "--validate=true", "-f", "-"); err != nil { + if _, err := framework.RunKubectlInput(namespace, resource, ns, "apply", "--validate=true", "-f", "-"); err != nil { return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, ns, err) } - if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { + if _, err := framework.RunKubectl(namespace, ns, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { return fmt.Errorf("failed to delete CR %s: %v", name, err) } return nil @@ -2536,7 +2536,7 @@ func validateController(c clientset.Interface, containerImage string, replicas i ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector waitLoop: for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) { - getPodsOutput := framework.RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns)) + getPodsOutput := framework.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns)) pods := strings.Fields(getPodsOutput) if numPods := len(pods); numPods != replicas { ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods)) @@ -2544,13 +2544,13 @@ waitLoop: } var runningPods []string for _, podID := range pods { - running := framework.RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns)) + running := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns)) if running != "true" { framework.Logf("%s is created but not running", podID) continue waitLoop } - currentImage := framework.RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns)) + currentImage := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns)) currentImage = trimDockerRegistry(currentImage) if currentImage != containerImage { framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage) @@ -2596,13 +2596,13 @@ func deleteObj(client dynamic.ResourceInterface, name string) { // createObjValidateOutputAndCleanup creates an object using the provided client // and then verifies that the kubectl get output provides custom columns. Once // the test has completed, it deletes the object. -func createObjValidateOutputAndCleanup(client dynamic.ResourceInterface, obj *unstructured.Unstructured, resource metav1.APIResource) { +func createObjValidateOutputAndCleanup(namespace string, client dynamic.ResourceInterface, obj *unstructured.Unstructured, resource metav1.APIResource) { _, err := client.Create(obj, metav1.CreateOptions{}) framework.ExpectNoError(err) defer deleteObj(client, obj.GetName()) // get test resource - output := framework.RunKubectlOrDie("get", resource.Name, "--all-namespaces") + output := framework.RunKubectlOrDie(namespace, "get", resource.Name, "--all-namespaces") if output == "" { framework.Failf("No stdout from kubectl get for %s (likely need to define test resources)", resource.Name) } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index e87f0cc3f3e..2b00ff8ec62 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -169,7 +169,7 @@ func (c *portForwardCommand) Stop() { // runPortForward runs port-forward, warning, this may need root functionality on some systems. func runPortForward(ns, podName string, port int) *portForwardCommand { - tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) cmd := tk.KubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port)) // This is somewhat ugly but is the only way to retrieve the port that was picked // by the port-forward command. We don't want to hard code the port as we have no diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index fe87585ce1f..a9d3fb58c20 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -87,11 +87,11 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { } for _, ns := range namespaces { - framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns)) + framework.RunKubectlOrDie(ns.Name, "create", "-f", backendRcYaml, getNsCmdFlag(ns)) } for _, ns := range namespaces { - framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns)) + framework.RunKubectlOrDie(ns.Name, "create", "-f", backendSvcYaml, getNsCmdFlag(ns)) } // wait for objects @@ -139,7 +139,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // create a pod in each namespace for _, ns := range namespaces { - framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie() + framework.NewKubectlCommand(ns.Name, "create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie(ns.Name) } // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index d6d93dc700a..a20daa7961e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -549,7 +549,7 @@ var _ = SIGDescribe("Services", func() { // Restart apiserver ginkgo.By("Restarting apiserver") - if err := framework.RestartApiserver(cs); err != nil { + if err := framework.RestartApiserver(ns, cs); err != nil { framework.Failf("error restarting apiserver: %v", err) } ginkgo.By("Waiting for apiserver to come up by polling /healthz") diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index 5d762bfa6df..f814e91bf25 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -105,7 +105,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p func restartNfsServer(serverPod *v1.Pod) { const startcmd = "/usr/sbin/rpc.nfsd 1" ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace) - framework.RunKubectlOrDie("exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd) + framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd) } // Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the @@ -114,7 +114,7 @@ func restartNfsServer(serverPod *v1.Pod) { func stopNfsServer(serverPod *v1.Pod) { const stopcmd = "/usr/sbin/rpc.nfsd 0" ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace) - framework.RunKubectlOrDie("exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd) + framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd) } // Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index 2051f5324c2..abe65122a77 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -212,9 +213,10 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) testContent := "hello" testFilePath := mountPath + "/TEST" - err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name) + err = tk.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) framework.ExpectNoError(err) - content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) + content, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) framework.ExpectNoError(err) gomega.Expect(content).To(gomega.ContainSubstring(testContent)) @@ -266,7 +268,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) err = f.WaitForPodRunning(pod.Name) framework.ExpectNoError(err, "Error waiting for pod to run %v", pod) - content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath) + content, err = tk.ReadFileViaContainer(pod.Name, "test-container", testFilePath) framework.ExpectNoError(err, "Error reading file via container") gomega.Expect(content).NotTo(gomega.ContainSubstring(testContent)) } diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 4cbf30e5028..3216d0fea8e 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -66,6 +66,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/deployment:go_default_library", + "//test/e2e/framework/kubectl:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 88040aaa779..df973f04b75 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -40,6 +40,7 @@ import ( clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" @@ -182,7 +183,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() { containerName = "mycontainer" testFile = "/testpd1/tracker" testFileContents = fmt.Sprintf("%v", rand.Int()) - framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) + framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) ginkgo.By("verifying PD is present in node0's VolumeInUse list") framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */)) @@ -205,7 +207,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { framework.Logf("deleted host0Pod %q", host0Pod.Name) } else { ginkgo.By("verifying PD contents in host1Pod") - verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents}) + verifyPDContentsViaContainer(ns, f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents}) framework.Logf("verified PD contents in pod %q", host1Pod.Name) ginkgo.By("verifying PD is removed from node0") framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) @@ -289,7 +291,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() { testFile := fmt.Sprintf("/testpd%d/tracker%d", x, i) testFileContents := fmt.Sprintf("%v", rand.Int()) fileAndContentToVerify[testFile] = testFileContents - framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) + framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name) } @@ -297,7 +300,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { if numContainers > 1 { containerName = fmt.Sprintf("mycontainer%v", rand.Intn(numContainers)+1) } - verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) + verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify) ginkgo.By("deleting host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") @@ -383,7 +386,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("writing content to host0Pod") testFile := "/testpd1/tracker" testFileContents := fmt.Sprintf("%v", rand.Int()) - framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) + framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) ginkgo.By("verifying PD is present in node0's VolumeInUse list") @@ -453,10 +457,11 @@ func countReadyNodes(c clientset.Interface, hostName types.NodeName) int { return len(nodes.Items) } -func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) { +func verifyPDContentsViaContainer(namespace string, f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) { for filePath, expectedContents := range fileAndContentToVerify { // No retry loop as there should not be temporal based failures - v, err := f.ReadFileViaContainer(podName, containerName, filePath) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace) + v, err := tk.ReadFileViaContainer(podName, containerName, filePath) framework.ExpectNoError(err, "Error reading file %s via container %s", filePath, containerName) framework.Logf("Read file %q with content: %v", filePath, v) if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) { diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 6c17ec48fa4..f4ad65377de 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -962,5 +962,5 @@ func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, shell = "/bin/sh" option = "-c" } - return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command) + return framework.RunKubectl(pod.Namespace, "exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command) } diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 7c721584e51..be70abc5ac9 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -364,7 +364,7 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) { for _, filePath := range filePaths { - _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath) + _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath) framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName)) } } @@ -822,7 +822,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str // writeContentToPodFile writes the given content to the specified file. func writeContentToPodFile(namespace, podName, filePath, content string) error { - _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, + _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath)) return err } @@ -830,7 +830,7 @@ func writeContentToPodFile(namespace, podName, filePath, content string) error { // expectFileContentToMatch checks if a given file contains the specified // content, else fails. func expectFileContentToMatch(namespace, podName, filePath, content string) { - _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, + _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath)) framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName)) } diff --git a/test/e2e/upgrades/cassandra.go b/test/e2e/upgrades/cassandra.go index 2e3a06cb5e9..6d1c04a8942 100644 --- a/test/e2e/upgrades/cassandra.go +++ b/test/e2e/upgrades/cassandra.go @@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool { func cassandraKubectlCreate(ns, file string) { input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file))) - framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) + framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) } // Setup creates a Cassandra StatefulSet and a PDB. It also brings up a tester diff --git a/test/e2e/upgrades/etcd.go b/test/e2e/upgrades/etcd.go index 9d88c010f12..f26cde0fdec 100644 --- a/test/e2e/upgrades/etcd.go +++ b/test/e2e/upgrades/etcd.go @@ -59,7 +59,7 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool { func kubectlCreate(ns, file string) { input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file))) - framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) + framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) } // Setup creates etcd statefulset and then verifies that the etcd is writable. diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index 99d466d3b9e..42ca334838f 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -61,7 +61,7 @@ func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool { func mysqlKubectlCreate(ns, file string) { input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file))) - framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) + framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) } func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index 11cccd2d086..fb1c90d2b93 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -109,7 +109,7 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Full [Slow]", } ginkgo.By("creating the GMSA custom resource") - customResourceCleanup, err := createGmsaCustomResource(crdManifestContents) + customResourceCleanup, err := createGmsaCustomResource(f.Namespace.Name, crdManifestContents) defer customResourceCleanup() if err != nil { framework.Failf(err.Error()) @@ -235,9 +235,9 @@ func deployGmsaWebhook(f *framework.Framework, deployScriptPath string) (func(), // regardless of whether the deployment succeeded, let's do a best effort at cleanup cleanUpFunc = func() { - framework.RunKubectl("delete", "--filename", manifestsFile) - framework.RunKubectl("delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io") - framework.RunKubectl("delete", "CertificateSigningRequest", fmt.Sprintf("%s.%s", name, namespace)) + framework.RunKubectl(f.Namespace.Name, "delete", "--filename", manifestsFile) + framework.RunKubectl(f.Namespace.Name, "delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io") + framework.RunKubectl(f.Namespace.Name, "delete", "CertificateSigningRequest", fmt.Sprintf("%s.%s", name, namespace)) os.RemoveAll(tempDir) } @@ -262,7 +262,7 @@ func deployGmsaWebhook(f *framework.Framework, deployScriptPath string) (func(), // of the manifest file retrieved from the worker node. // It returns a function to clean up both the temp file it creates and // the API object it creates when done with testing. -func createGmsaCustomResource(crdManifestContents string) (func(), error) { +func createGmsaCustomResource(ns string, crdManifestContents string) (func(), error) { cleanUpFunc := func() {} tempFile, err := ioutil.TempFile("", "") @@ -272,7 +272,7 @@ func createGmsaCustomResource(crdManifestContents string) (func(), error) { defer tempFile.Close() cleanUpFunc = func() { - framework.RunKubectl("delete", "--filename", tempFile.Name()) + framework.RunKubectl(ns, "delete", "--filename", tempFile.Name()) os.Remove(tempFile.Name()) } @@ -282,7 +282,7 @@ func createGmsaCustomResource(crdManifestContents string) (func(), error) { return cleanUpFunc, err } - output, err := framework.RunKubectl("apply", "--filename", tempFile.Name()) + output, err := framework.RunKubectl(ns, "apply", "--filename", tempFile.Name()) if err != nil { err = errors.Wrapf(err, "unable to create custom resource, output:\n%s", output) } @@ -392,5 +392,5 @@ func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string func runKubectlExecInNamespace(namespace string, args ...string) (string, error) { namespaceOption := fmt.Sprintf("--namespace=%s", namespace) - return framework.RunKubectl(append([]string{"exec", namespaceOption}, args...)...) + return framework.RunKubectl(namespace, append([]string{"exec", namespaceOption}, args...)...) } diff --git a/test/e2e/windows/gmsa_kubelet.go b/test/e2e/windows/gmsa_kubelet.go index 04587beb18e..a8c004e6101 100644 --- a/test/e2e/windows/gmsa_kubelet.go +++ b/test/e2e/windows/gmsa_kubelet.go @@ -98,7 +98,7 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Kubelet [Slow] // note that the "eventually" part seems to be needed to account for the fact that powershell containers // are a bit slow to become responsive, even when docker reports them as running. gomega.Eventually(func() bool { - output, err = framework.RunKubectl("exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN") + output, err = framework.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN") return err == nil }, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index b52e6f53679..2fcc3864926 100644 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -195,7 +195,7 @@ func getNodeMemory(f *framework.Framework) nodeMemory { nodeName := nodeList.Items[0].ObjectMeta.Name - kubeletConfig, err := getCurrentKubeletConfig(nodeName) + kubeletConfig, err := getCurrentKubeletConfig(nodeName, f.Namespace.Name) framework.ExpectNoError(err) systemReserve, err := resource.ParseQuantity(kubeletConfig.SystemReserved["memory"]) @@ -250,9 +250,9 @@ func getTotalAllocatableMemory(f *framework.Framework) *resource.Quantity { } // getCurrentKubeletConfig modified from test/e2e_node/util.go -func getCurrentKubeletConfig(nodeName string) (*kubeletconfig.KubeletConfiguration, error) { +func getCurrentKubeletConfig(nodeName, namespace string) (*kubeletconfig.KubeletConfiguration, error) { - resp := pollConfigz(5*time.Minute, 5*time.Second, nodeName) + resp := pollConfigz(5*time.Minute, 5*time.Second, nodeName, namespace) kubeCfg, err := decodeConfigz(resp) if err != nil { return nil, err @@ -261,10 +261,10 @@ func getCurrentKubeletConfig(nodeName string) (*kubeletconfig.KubeletConfigurati } // Causes the test to fail, or returns a status 200 response from the /configz endpoint -func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName string) *http.Response { +func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName, namespace string) *http.Response { // start local proxy, so we can send graceful deletion over query string, rather than body parameter ginkgo.By("Opening proxy to cluster") - tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath) + tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace) cmd := tk.KubectlCmd("proxy", "-p", "0") stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) framework.ExpectNoError(err)