mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
moved WriteFileViaContainer and ReadFileViaContainer to kubectl_utils
This commit is contained in:
parent
4b99e5305e
commit
b85adbf1fd
@ -72,55 +72,55 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with known and required properties")
|
||||
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(validCR, ns, "create", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to create valid CR %s: %v", validCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
|
||||
framework.Failf("failed to delete valid CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(validCR, ns, "apply", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to apply valid CR %s: %v", validCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
|
||||
framework.Failf("failed to delete valid CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
|
||||
unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
framework.Failf("unexpected no error when creating CR with unknown field: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(unknownCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
framework.Failf("unexpected no error when applying CR with unknown field: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) rejects request without required properties")
|
||||
noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
framework.Failf("unexpected no error when creating CR without required field: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(noRequireCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
framework.Failf("unexpected no error when applying CR without required field: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR properties")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*<string>.*APIVersion defines.*spec.*<Object>.*Specification of Foo`); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*<string>.*APIVersion defines.*spec.*<Object>.*Specification of Foo`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR properties recursively")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*<string>.*CreationTimestamp is a timestamp`); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*<string>.*CreationTimestamp is a timestamp`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec", `(?s)DESCRIPTION:.*Specification of Foo.*FIELDS:.*bars.*<\[\]Object>.*List of Bars and their specs`); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural+".spec", `(?s)DESCRIPTION:.*Specification of Foo.*FIELDS:.*bars.*<\[\]Object>.*List of Bars and their specs`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec.bars", `(?s)RESOURCE:.*bars.*<\[\]Object>.*DESCRIPTION:.*List of Bars and their specs.*FIELDS:.*bazs.*<\[\]string>.*List of Bazs.*name.*<string>.*Name of Bar`); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural+".spec.bars", `(?s)RESOURCE:.*bars.*<\[\]Object>.*DESCRIPTION:.*List of Bars and their specs.*FIELDS:.*bazs.*<\[\]string>.*List of Bazs.*name.*<string>.*Name of Bar`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist")
|
||||
if _, err := framework.RunKubectl("explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
|
||||
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
|
||||
}
|
||||
|
||||
@ -147,21 +147,21 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR without validation schema")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*<empty>`); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*<empty>`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
@ -188,21 +188,21 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, fmt.Sprintf(`(?s)KIND:.*%s`, crd.Crd.Spec.Names.Kind)); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, fmt.Sprintf(`(?s)KIND:.*%s`, crd.Crd.Spec.Names.Kind)); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
@ -230,21 +230,21 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"spec":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*preserve-unknown-properties in nested field for Testing`); err != nil {
|
||||
if err := verifyKubectlExplain(f.Namespace.Name, crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*preserve-unknown-properties in nested field for Testing`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
@ -663,8 +663,8 @@ func dropDefaults(s *spec.Schema) {
|
||||
delete(s.Extensions, "x-kubernetes-group-version-kind")
|
||||
}
|
||||
|
||||
func verifyKubectlExplain(name, pattern string) error {
|
||||
result, err := framework.RunKubectl("explain", name)
|
||||
func verifyKubectlExplain(ns, name, pattern string) error {
|
||||
result, err := framework.RunKubectl(ns, "explain", name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to explain %s: %v", name, err)
|
||||
}
|
||||
|
@ -1198,7 +1198,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
||||
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
defer timer.Stop()
|
||||
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
||||
_, err = framework.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
||||
framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook")
|
||||
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
|
||||
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
||||
|
@ -848,10 +848,10 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func kubectlExecWithRetries(args ...string) (out string) {
|
||||
func kubectlExecWithRetries(ns string, args ...string) (out string) {
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
if out, err = framework.RunKubectl(args...); err == nil {
|
||||
if out, err = framework.RunKubectl(ns, args...); err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
|
||||
@ -916,7 +916,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
|
||||
framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
framework.Logf(framework.RunKubectlOrDie(z.ss.Namespace, "exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
}
|
||||
}
|
||||
|
||||
@ -924,7 +924,7 @@ func (z *zookeeperTester) read(statefulPodIndex int, key string) string {
|
||||
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
|
||||
ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace)
|
||||
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key)
|
||||
return lastLine(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
return lastLine(framework.RunKubectlOrDie(z.ss.Namespace, "exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
}
|
||||
|
||||
type mysqlGaleraTester struct {
|
||||
@ -941,7 +941,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
|
||||
// TODO: Find a readiness probe for mysql that guarantees writes will
|
||||
// succeed and ditch retries. Current probe only reads, so there's a window
|
||||
// for a race.
|
||||
return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
return kubectlExecWithRetries(ns, fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
@ -981,7 +981,7 @@ func (m *redisTester) name() string {
|
||||
|
||||
func (m *redisTester) redisExec(cmd, ns, podName string) string {
|
||||
cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd)
|
||||
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
return framework.RunKubectlOrDie(ns, fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
@ -1012,7 +1012,7 @@ func (c *cockroachDBTester) name() string {
|
||||
|
||||
func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
|
||||
cmd = fmt.Sprintf("/cockroach/cockroach sql --insecure --host %s.cockroachdb -e \"%v\"", podName, cmd)
|
||||
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
return framework.RunKubectlOrDie(ns, fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
|
@ -1,9 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -19,6 +14,7 @@ go_library(
|
||||
"service_accounts.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/auth",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
@ -55,6 +51,7 @@ go_library(
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/kubectl:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -77,4 +74,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -224,11 +225,12 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
mountedToken, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey))
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name)
|
||||
mountedToken, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey))
|
||||
framework.ExpectNoError(err)
|
||||
mountedCA, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey))
|
||||
mountedCA, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey))
|
||||
framework.ExpectNoError(err)
|
||||
mountedNamespace, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey))
|
||||
mountedNamespace, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// CA and namespace should be identical
|
||||
|
@ -220,7 +220,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet(f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
@ -247,7 +247,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet(f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
@ -277,7 +277,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet(f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
@ -306,7 +306,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet(f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
@ -593,7 +593,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
if len(newNodesSet) > 1 {
|
||||
ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
|
||||
klog.Infof("Usually only 1 new node is expected, investigating")
|
||||
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
|
||||
if output, err := exec.Command("gcloud", "compute", "instances", "list",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
|
||||
@ -997,10 +997,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func installNvidiaDriversDaemonSet() {
|
||||
func installNvidiaDriversDaemonSet(namespace string) {
|
||||
ginkgo.By("Add daemonset which installs nvidia drivers")
|
||||
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
|
||||
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
|
||||
framework.RunKubectlOrDie(namespace, "apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
|
||||
}
|
||||
|
||||
func execCmd(args ...string) *exec.Cmd {
|
||||
@ -1400,8 +1400,8 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface
|
||||
klog.Infof("Too many pods are not ready yet: %v", notready)
|
||||
}
|
||||
klog.Info("Timeout on waiting for pods being ready")
|
||||
klog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
|
||||
klog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces"))
|
||||
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
|
||||
|
||||
// Some pods are still not running.
|
||||
return fmt.Errorf("Too many pods are still not running: %v", notready)
|
||||
|
@ -258,11 +258,11 @@ func (tc *CustomMetricTestCase) Run() {
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectID)
|
||||
|
||||
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
||||
err = monitoring.CreateAdapter(tc.framework.Namespace.ObjectMeta.Name, monitoring.AdapterDefault)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
|
||||
defer monitoring.CleanupAdapter(tc.framework.Namespace.ObjectMeta.Name, monitoring.AdapterDefault)
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||
|
@ -103,7 +103,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, masterUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
|
||||
@ -144,7 +144,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, clusterUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
||||
@ -177,7 +177,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
@ -225,7 +225,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, gpuUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
|
||||
@ -243,7 +243,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, gpuUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
||||
@ -265,7 +265,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
@ -291,7 +291,7 @@ var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]",
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, statefulUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
||||
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
||||
|
@ -69,8 +69,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
httpYaml := readFile(test, "http-liveness.yaml.in")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
framework.RunKubectlOrDieInput(execYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(httpYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(ns, execYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(ns, httpYaml, "create", "-f", "-", nsFlag)
|
||||
|
||||
// Since both containers start rapidly, we can easily run this test in parallel.
|
||||
var wg sync.WaitGroup
|
||||
@ -120,8 +120,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
podName := "secret-test-pod"
|
||||
|
||||
ginkgo.By("creating secret and pod")
|
||||
framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag)
|
||||
err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -139,7 +139,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
podName := "dapi-test-pod"
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag)
|
||||
err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
@ -22,7 +22,6 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
@ -52,14 +51,12 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
maxKubectlExecRetries = 5
|
||||
// DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion.
|
||||
DefaultNamespaceDeletionTimeout = 5 * time.Minute
|
||||
)
|
||||
@ -502,35 +499,6 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod,
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// WriteFileViaContainer writes a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
ginkgo.By("writing a file in the container")
|
||||
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
for _, c := range contents {
|
||||
if !strings.ContainsRune(allowedCharacters, c) {
|
||||
return fmt.Errorf("Unsupported character in string to write: %v", c)
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadFileViaContainer reads a file using kubectl exec cat <path>.
|
||||
func (f *Framework) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
|
||||
ginkgo.By("reading a file in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
|
||||
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) {
|
||||
var err error
|
||||
@ -655,52 +623,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return stdOutBytes, stdErrBytes, err
|
||||
}
|
||||
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmdArgs := []string{
|
||||
"exec",
|
||||
fmt.Sprintf("--namespace=%v", namespace),
|
||||
podName,
|
||||
fmt.Sprintf("-c=%v", containerName),
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath)
|
||||
cmd := tk.KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
||||
// KubeDescribe is wrapper function for ginkgo describe. Adds namespacing.
|
||||
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
|
||||
func KubeDescribe(text string, body func()) bool {
|
||||
|
@ -446,10 +446,10 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
|
||||
}
|
||||
|
||||
j.Logger.Infof("creating replication controller")
|
||||
framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.RunKubectlOrDieInput(ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
|
||||
j.Logger.Infof("creating service")
|
||||
framework.RunKubectlOrDieInput(read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
if len(svcAnnotations) > 0 {
|
||||
svcList, err := j.Client.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -462,7 +462,7 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
|
||||
|
||||
if exists("secret.yaml") {
|
||||
j.Logger.Infof("creating secret")
|
||||
framework.RunKubectlOrDieInput(read("secret.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.RunKubectlOrDieInput(ns, read("secret.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
}
|
||||
j.Logger.Infof("Parsing ingress from %v", filepath.Join(manifestPath, "ing.yaml"))
|
||||
|
||||
@ -904,7 +904,7 @@ func (cont *NginxIngressController) Init() {
|
||||
return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file)))
|
||||
}
|
||||
framework.Logf("initializing nginx ingress controller")
|
||||
framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))
|
||||
framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))
|
||||
|
||||
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -13,6 +13,9 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -17,10 +17,12 @@ limitations under the License.
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -29,25 +31,34 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// TestKubeconfig is a struct containing the minimum attributes needed to run KubectlCmd.
|
||||
const (
|
||||
maxKubectlExecRetries = 5
|
||||
)
|
||||
|
||||
// TestKubeconfig is a struct containing the needed attributes from TestContext and Framework(Namespace).
|
||||
type TestKubeconfig struct {
|
||||
CertDir string
|
||||
Host string
|
||||
KubeConfig string
|
||||
KubeContext string
|
||||
KubectlPath string
|
||||
Namespace string // Every test has at least one namespace unless creation is skipped
|
||||
}
|
||||
|
||||
// NewTestKubeconfig returns a new Kubeconfig struct instance.
|
||||
func NewTestKubeconfig(certdir string, host string, kubeconfig string, kubecontext string, kubectlpath string) *TestKubeconfig {
|
||||
func NewTestKubeconfig(certdir, host, kubeconfig, kubecontext, kubectlpath, namespace string) *TestKubeconfig {
|
||||
return &TestKubeconfig{
|
||||
CertDir: certdir,
|
||||
Host: host,
|
||||
KubeConfig: kubeconfig,
|
||||
KubeContext: kubecontext,
|
||||
KubectlPath: kubectlpath,
|
||||
Namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,3 +127,77 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteFileViaContainer writes a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
ginkgo.By("writing a file in the container")
|
||||
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
for _, c := range contents {
|
||||
if !strings.ContainsRune(allowedCharacters, c) {
|
||||
return fmt.Errorf("Unsupported character in string to write: %v", c)
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadFileViaContainer reads a file using kubectl exec cat <path>.
|
||||
func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
|
||||
ginkgo.By("reading a file in the container")
|
||||
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return stdOutBytes, stdErrBytes, err
|
||||
}
|
||||
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmdArgs := []string{
|
||||
"exec",
|
||||
fmt.Sprintf("--namespace=%v", namespace),
|
||||
podName,
|
||||
fmt.Sprintf("-c=%v", containerName),
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
cmd := tk.KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
|
||||
}
|
||||
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
||||
desc, _ := framework.RunKubectl(
|
||||
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
||||
e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
||||
framework.Logf(desc)
|
||||
}
|
||||
}
|
||||
@ -423,7 +423,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
|
||||
}); pollErr != nil {
|
||||
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
|
||||
desc, _ := framework.RunKubectl(
|
||||
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
|
||||
config.Namespace, "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
|
||||
framework.Logf("%s", desc)
|
||||
framework.Failf("Timed out in %v: %v", retryTimeout, msg)
|
||||
}
|
||||
|
@ -47,12 +47,12 @@ func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
}
|
||||
|
||||
// MasterUpgrade upgrades master node on GCE/GKE.
|
||||
func MasterUpgrade(v string) error {
|
||||
func MasterUpgrade(f *Framework, v string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return masterUpgradeGCE(v, false)
|
||||
case "gke":
|
||||
return masterUpgradeGKE(v)
|
||||
return masterUpgradeGKE(f.Namespace.Name, v)
|
||||
case "kubernetes-anywhere":
|
||||
return masterUpgradeKubernetesAnywhere(v)
|
||||
default:
|
||||
@ -113,7 +113,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
return args
|
||||
}
|
||||
|
||||
func masterUpgradeGKE(v string) error {
|
||||
func masterUpgradeGKE(namespace string, v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
args := []string{
|
||||
"container",
|
||||
@ -131,7 +131,7 @@ func masterUpgradeGKE(v string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
waitForSSHTunnels()
|
||||
waitForSSHTunnels(namespace)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -181,7 +181,7 @@ func NodeUpgrade(f *Framework, v string, img string) error {
|
||||
case "gce":
|
||||
err = nodeUpgradeGCE(v, img, false)
|
||||
case "gke":
|
||||
err = nodeUpgradeGKE(v, img)
|
||||
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
|
||||
default:
|
||||
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
@ -230,7 +230,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func nodeUpgradeGKE(v string, img string) error {
|
||||
func nodeUpgradeGKE(namespace string, v string, img string) error {
|
||||
Logf("Upgrading nodes to version %q and image %q", v, img)
|
||||
nps, err := nodePoolsGKE()
|
||||
if err != nil {
|
||||
@ -258,7 +258,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
waitForSSHTunnels()
|
||||
waitForSSHTunnels(namespace)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -290,18 +290,18 @@ func gceUpgradeScript() string {
|
||||
return TestContext.GCEUpgradeScript
|
||||
}
|
||||
|
||||
func waitForSSHTunnels() {
|
||||
func waitForSSHTunnels(namespace string) {
|
||||
Logf("Waiting for SSH tunnels to establish")
|
||||
RunKubectl("run", "ssh-tunnel-test",
|
||||
RunKubectl(namespace, "run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer RunKubectl("delete", "pod", "ssh-tunnel-test")
|
||||
defer RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := RunKubectl("logs", "ssh-tunnel-test")
|
||||
_, err := RunKubectl(namespace, "logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(sv
|
||||
func DescribeSvc(ns string) {
|
||||
framework.Logf("\nOutput of kubectl describe svc:\n")
|
||||
desc, _ := framework.RunKubectl(
|
||||
"describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||
ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.Logf(desc)
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -587,7 +586,7 @@ func Cleanup(filePath, ns string, selectors ...string) {
|
||||
if ns != "" {
|
||||
nsArg = fmt.Sprintf("--namespace=%s", ns)
|
||||
}
|
||||
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
|
||||
RunKubectlOrDie(ns, "delete", "--grace-period=0", "-f", filePath, nsArg)
|
||||
AssertCleanup(ns, selectors...)
|
||||
}
|
||||
|
||||
@ -602,12 +601,12 @@ func AssertCleanup(ns string, selectors ...string) {
|
||||
verifyCleanupFunc := func() (bool, error) {
|
||||
e = nil
|
||||
for _, selector := range selectors {
|
||||
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
|
||||
resources := RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers", nsArg)
|
||||
if resources != "" {
|
||||
e = fmt.Errorf("Resources left running after stop:\n%s", resources)
|
||||
return false, nil
|
||||
}
|
||||
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
|
||||
pods := RunKubectlOrDie(ns, "get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
|
||||
if pods != "" {
|
||||
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
|
||||
return false, nil
|
||||
@ -629,7 +628,7 @@ func LookForStringInPodExec(ns, podName string, command []string, expectedString
|
||||
// use the first container
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
|
||||
args = append(args, command...)
|
||||
return RunKubectlOrDie(args...)
|
||||
return RunKubectlOrDie(ns, args...)
|
||||
})
|
||||
}
|
||||
|
||||
@ -656,9 +655,9 @@ type KubectlBuilder struct {
|
||||
}
|
||||
|
||||
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
|
||||
func NewKubectlCommand(args ...string) *KubectlBuilder {
|
||||
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
b := new(KubectlBuilder)
|
||||
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath)
|
||||
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace)
|
||||
b.cmd = tk.KubectlCmd(args...)
|
||||
return b
|
||||
}
|
||||
@ -688,14 +687,14 @@ func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
|
||||
}
|
||||
|
||||
// ExecOrDie runs the kubectl executable or dies if error occurs.
|
||||
func (b KubectlBuilder) ExecOrDie() string {
|
||||
func (b KubectlBuilder) ExecOrDie(namespace string) string {
|
||||
str, err := b.Exec()
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
|
||||
time.Sleep(2 * time.Second)
|
||||
retryStr, retryErr := RunKubectl("version")
|
||||
retryStr, retryErr := RunKubectl(namespace, "version")
|
||||
Logf("stdout: %q", retryStr)
|
||||
Logf("err: %v", retryErr)
|
||||
}
|
||||
@ -754,23 +753,23 @@ func (b KubectlBuilder) Exec() (string, error) {
|
||||
}
|
||||
|
||||
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectlOrDie(args ...string) string {
|
||||
return NewKubectlCommand(args...).ExecOrDie()
|
||||
func RunKubectlOrDie(namespace string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectl is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectl(args ...string) (string, error) {
|
||||
return NewKubectlCommand(args...).Exec()
|
||||
func RunKubectl(namespace string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).Exec()
|
||||
}
|
||||
|
||||
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlOrDieInput(data string, args ...string) string {
|
||||
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
|
||||
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlInput(data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(args...).WithStdinData(data).Exec()
|
||||
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
|
||||
}
|
||||
|
||||
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
|
||||
@ -1258,7 +1257,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
return RunKubectl(ns, "exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdOrDie calls RunHostCmd and dies on error.
|
||||
@ -1381,7 +1380,7 @@ func RestartKubelet(host string) error {
|
||||
}
|
||||
|
||||
// RestartApiserver restarts the kube-apiserver.
|
||||
func RestartApiserver(cs clientset.Interface) error {
|
||||
func RestartApiserver(namespace string, cs clientset.Interface) error {
|
||||
// TODO: Make it work for all providers.
|
||||
if !ProviderIs("gce", "gke", "aws") {
|
||||
return fmt.Errorf("unsupported provider for RestartApiserver: %s", TestContext.Provider)
|
||||
@ -1402,7 +1401,7 @@ func RestartApiserver(cs clientset.Interface) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
|
||||
return masterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v'
|
||||
}
|
||||
|
||||
func sshRestartMaster() error {
|
||||
@ -1546,7 +1545,7 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
|
||||
// LookForStringInLog looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForString(expectedString, timeout, func() string {
|
||||
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
|
||||
return RunKubectlOrDie(ns, "logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
|
||||
})
|
||||
}
|
||||
|
||||
@ -1804,7 +1803,7 @@ func GetAllMasterAddresses(c clientset.Interface) []string {
|
||||
func DescribeIng(ns string) {
|
||||
Logf("\nOutput of kubectl describe ing:\n")
|
||||
desc, _ := RunKubectl(
|
||||
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
|
||||
ns, "describe", "ing", fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf(desc)
|
||||
}
|
||||
|
||||
@ -1851,7 +1850,7 @@ func (f *Framework) NewAgnhostPod(name string, args ...string) *v1.Pod {
|
||||
// CreateEmptyFileOnPod creates empty file at given path on the pod.
|
||||
// TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored.
|
||||
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
|
||||
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
_, err := RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1859,10 +1858,10 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
|
||||
desc, _ := RunKubectl(ns, "describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
|
||||
l, _ := RunKubectl(ns, "logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
|
||||
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
@ -614,7 +614,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
|
||||
}
|
||||
out, err := framework.RunKubectl(commands...)
|
||||
out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
|
||||
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
|
||||
}
|
||||
|
||||
|
@ -251,11 +251,11 @@ func prometheusExporterPodSpec(metricName string, metricValue int64, port int32)
|
||||
|
||||
// CreateAdapter creates Custom Metrics - Stackdriver adapter
|
||||
// adapterDeploymentFile should be a filename for adapter deployment located in StagingDeploymentLocation
|
||||
func CreateAdapter(adapterDeploymentFile string) error {
|
||||
func CreateAdapter(namespace, adapterDeploymentFile string) error {
|
||||
// A workaround to make the work on GKE. GKE doesn't normally allow to create cluster roles,
|
||||
// which the adapter deployment does. The solution is to create cluster role binding for
|
||||
// cluster-admin role and currently used service account.
|
||||
err := createClusterAdminBinding()
|
||||
err := createClusterAdminBinding(namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -264,12 +264,12 @@ func CreateAdapter(adapterDeploymentFile string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, err := framework.RunKubectl("create", "-f", adapterURL)
|
||||
stat, err := framework.RunKubectl(namespace, "create", "-f", adapterURL)
|
||||
framework.Logf(stat)
|
||||
return err
|
||||
}
|
||||
|
||||
func createClusterAdminBinding() error {
|
||||
func createClusterAdminBinding(namespace string) error {
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account")
|
||||
if err != nil {
|
||||
framework.Logf(stderr)
|
||||
@ -277,7 +277,7 @@ func createClusterAdminBinding() error {
|
||||
}
|
||||
serviceAccount := strings.TrimSpace(stdout)
|
||||
framework.Logf("current service account: %q", serviceAccount)
|
||||
stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
||||
stat, err := framework.RunKubectl(namespace, "create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
||||
framework.Logf(stat)
|
||||
return err
|
||||
}
|
||||
@ -316,8 +316,8 @@ func CleanupDescriptors(service *gcm.Service, projectID string) {
|
||||
}
|
||||
|
||||
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
|
||||
func CleanupAdapter(adapterDeploymentFile string) {
|
||||
stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile)
|
||||
func CleanupAdapter(namespace, adapterDeploymentFile string) {
|
||||
stat, err := framework.RunKubectl(namespace, "delete", "-f", adapterDeploymentFile)
|
||||
framework.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployments: %s", err)
|
||||
@ -326,11 +326,11 @@ func CleanupAdapter(adapterDeploymentFile string) {
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployment file: %s", err)
|
||||
}
|
||||
cleanupClusterAdminBinding()
|
||||
cleanupClusterAdminBinding(namespace)
|
||||
}
|
||||
|
||||
func cleanupClusterAdminBinding() {
|
||||
stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding)
|
||||
func cleanupClusterAdminBinding(namespace string) {
|
||||
stat, err := framework.RunKubectl(namespace, "delete", "clusterrolebinding", ClusterAdminBinding)
|
||||
framework.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete cluster admin binding: %s", err)
|
||||
|
@ -112,11 +112,11 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
|
||||
}
|
||||
defer CleanupDescriptors(gcmService, projectID)
|
||||
|
||||
err = CreateAdapter(adapterDeployment)
|
||||
err = CreateAdapter(f.Namespace.Name, adapterDeployment)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %s", err)
|
||||
}
|
||||
defer CleanupAdapter(adapterDeployment)
|
||||
defer CleanupAdapter(f.Namespace.Name, adapterDeployment)
|
||||
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
||||
if err != nil {
|
||||
@ -159,11 +159,11 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
|
||||
defer CleanupDescriptors(gcmService, projectID)
|
||||
|
||||
// Both deployments - for old and new resource model - expose External Metrics API.
|
||||
err = CreateAdapter(AdapterForOldResourceModel)
|
||||
err = CreateAdapter(f.Namespace.Name, AdapterForOldResourceModel)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %s", err)
|
||||
}
|
||||
defer CleanupAdapter(AdapterForOldResourceModel)
|
||||
defer CleanupAdapter(f.Namespace.Name, AdapterForOldResourceModel)
|
||||
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
||||
if err != nil {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -169,7 +169,7 @@ func (c *portForwardCommand) Stop() {
|
||||
|
||||
// runPortForward runs port-forward, warning, this may need root functionality on some systems.
|
||||
func runPortForward(ns, podName string, port int) *portForwardCommand {
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath)
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
|
||||
cmd := tk.KubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port))
|
||||
// This is somewhat ugly but is the only way to retrieve the port that was picked
|
||||
// by the port-forward command. We don't want to hard code the port as we have no
|
||||
|
@ -87,11 +87,11 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
|
||||
framework.RunKubectlOrDie(ns.Name, "create", "-f", backendRcYaml, getNsCmdFlag(ns))
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
|
||||
framework.RunKubectlOrDie(ns.Name, "create", "-f", backendSvcYaml, getNsCmdFlag(ns))
|
||||
}
|
||||
|
||||
// wait for objects
|
||||
@ -139,7 +139,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
|
||||
// create a pod in each namespace
|
||||
for _, ns := range namespaces {
|
||||
framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie()
|
||||
framework.NewKubectlCommand(ns.Name, "create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie(ns.Name)
|
||||
}
|
||||
|
||||
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
|
||||
|
@ -549,7 +549,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
// Restart apiserver
|
||||
ginkgo.By("Restarting apiserver")
|
||||
if err := framework.RestartApiserver(cs); err != nil {
|
||||
if err := framework.RestartApiserver(ns, cs); err != nil {
|
||||
framework.Failf("error restarting apiserver: %v", err)
|
||||
}
|
||||
ginkgo.By("Waiting for apiserver to come up by polling /healthz")
|
||||
|
@ -105,7 +105,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
|
||||
func restartNfsServer(serverPod *v1.Pod) {
|
||||
const startcmd = "/usr/sbin/rpc.nfsd 1"
|
||||
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
|
||||
framework.RunKubectlOrDie("exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
|
||||
framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the
|
||||
@ -114,7 +114,7 @@ func restartNfsServer(serverPod *v1.Pod) {
|
||||
func stopNfsServer(serverPod *v1.Pod) {
|
||||
const stopcmd = "/usr/sbin/rpc.nfsd 0"
|
||||
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
|
||||
framework.RunKubectlOrDie("exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
|
||||
framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
|
||||
}
|
||||
|
||||
// Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -212,9 +213,10 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
|
||||
|
||||
testContent := "hello"
|
||||
testFilePath := mountPath + "/TEST"
|
||||
err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name)
|
||||
err = tk.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
|
||||
framework.ExpectNoError(err)
|
||||
content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
|
||||
content, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(content).To(gomega.ContainSubstring(testContent))
|
||||
|
||||
@ -266,7 +268,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
|
||||
err = f.WaitForPodRunning(pod.Name)
|
||||
framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
|
||||
|
||||
content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
|
||||
content, err = tk.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
|
||||
framework.ExpectNoError(err, "Error reading file via container")
|
||||
gomega.Expect(content).NotTo(gomega.ContainSubstring(testContent))
|
||||
}
|
||||
|
@ -66,6 +66,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/kubectl:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
@ -182,7 +183,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
containerName = "mycontainer"
|
||||
testFile = "/testpd1/tracker"
|
||||
testFileContents = fmt.Sprintf("%v", rand.Int())
|
||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
|
||||
framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
|
||||
ginkgo.By("verifying PD is present in node0's VolumeInUse list")
|
||||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
|
||||
@ -205,7 +207,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
framework.Logf("deleted host0Pod %q", host0Pod.Name)
|
||||
} else {
|
||||
ginkgo.By("verifying PD contents in host1Pod")
|
||||
verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
|
||||
verifyPDContentsViaContainer(ns, f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
|
||||
framework.Logf("verified PD contents in pod %q", host1Pod.Name)
|
||||
ginkgo.By("verifying PD is removed from node0")
|
||||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
|
||||
@ -289,7 +291,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
testFile := fmt.Sprintf("/testpd%d/tracker%d", x, i)
|
||||
testFileContents := fmt.Sprintf("%v", rand.Int())
|
||||
fileAndContentToVerify[testFile] = testFileContents
|
||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
|
||||
framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
framework.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
|
||||
}
|
||||
|
||||
@ -297,7 +300,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
if numContainers > 1 {
|
||||
containerName = fmt.Sprintf("mycontainer%v", rand.Intn(numContainers)+1)
|
||||
}
|
||||
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
|
||||
verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify)
|
||||
|
||||
ginkgo.By("deleting host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
@ -383,7 +386,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
ginkgo.By("writing content to host0Pod")
|
||||
testFile := "/testpd1/tracker"
|
||||
testFileContents := fmt.Sprintf("%v", rand.Int())
|
||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
|
||||
framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
|
||||
|
||||
ginkgo.By("verifying PD is present in node0's VolumeInUse list")
|
||||
@ -453,10 +457,11 @@ func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
|
||||
return len(nodes.Items)
|
||||
}
|
||||
|
||||
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
|
||||
func verifyPDContentsViaContainer(namespace string, f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
|
||||
for filePath, expectedContents := range fileAndContentToVerify {
|
||||
// No retry loop as there should not be temporal based failures
|
||||
v, err := f.ReadFileViaContainer(podName, containerName, filePath)
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
|
||||
v, err := tk.ReadFileViaContainer(podName, containerName, filePath)
|
||||
framework.ExpectNoError(err, "Error reading file %s via container %s", filePath, containerName)
|
||||
framework.Logf("Read file %q with content: %v", filePath, v)
|
||||
if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
|
||||
|
@ -962,5 +962,5 @@ func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string,
|
||||
shell = "/bin/sh"
|
||||
option = "-c"
|
||||
}
|
||||
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command)
|
||||
return framework.RunKubectl(pod.Namespace, "exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command)
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st
|
||||
|
||||
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
|
||||
for _, filePath := range filePaths {
|
||||
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
|
||||
_, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
|
||||
}
|
||||
}
|
||||
@ -822,7 +822,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str
|
||||
|
||||
// writeContentToPodFile writes the given content to the specified file.
|
||||
func writeContentToPodFile(namespace, podName, filePath, content string) error {
|
||||
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
|
||||
_, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName,
|
||||
"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
|
||||
return err
|
||||
}
|
||||
@ -830,7 +830,7 @@ func writeContentToPodFile(namespace, podName, filePath, content string) error {
|
||||
// expectFileContentToMatch checks if a given file contains the specified
|
||||
// content, else fails.
|
||||
func expectFileContentToMatch(namespace, podName, filePath, content string) {
|
||||
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
|
||||
_, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName,
|
||||
"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
|
||||
func cassandraKubectlCreate(ns, file string) {
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file)))
|
||||
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
}
|
||||
|
||||
// Setup creates a Cassandra StatefulSet and a PDB. It also brings up a tester
|
||||
|
@ -59,7 +59,7 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
|
||||
func kubectlCreate(ns, file string) {
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file)))
|
||||
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
}
|
||||
|
||||
// Setup creates etcd statefulset and then verifies that the etcd is writable.
|
||||
|
@ -61,7 +61,7 @@ func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
|
||||
func mysqlKubectlCreate(ns, file string) {
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file)))
|
||||
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
}
|
||||
|
||||
func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
|
||||
|
@ -109,7 +109,7 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Full [Slow]",
|
||||
}
|
||||
|
||||
ginkgo.By("creating the GMSA custom resource")
|
||||
customResourceCleanup, err := createGmsaCustomResource(crdManifestContents)
|
||||
customResourceCleanup, err := createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
||||
defer customResourceCleanup()
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
@ -235,9 +235,9 @@ func deployGmsaWebhook(f *framework.Framework, deployScriptPath string) (func(),
|
||||
|
||||
// regardless of whether the deployment succeeded, let's do a best effort at cleanup
|
||||
cleanUpFunc = func() {
|
||||
framework.RunKubectl("delete", "--filename", manifestsFile)
|
||||
framework.RunKubectl("delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io")
|
||||
framework.RunKubectl("delete", "CertificateSigningRequest", fmt.Sprintf("%s.%s", name, namespace))
|
||||
framework.RunKubectl(f.Namespace.Name, "delete", "--filename", manifestsFile)
|
||||
framework.RunKubectl(f.Namespace.Name, "delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io")
|
||||
framework.RunKubectl(f.Namespace.Name, "delete", "CertificateSigningRequest", fmt.Sprintf("%s.%s", name, namespace))
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ func deployGmsaWebhook(f *framework.Framework, deployScriptPath string) (func(),
|
||||
// of the manifest file retrieved from the worker node.
|
||||
// It returns a function to clean up both the temp file it creates and
|
||||
// the API object it creates when done with testing.
|
||||
func createGmsaCustomResource(crdManifestContents string) (func(), error) {
|
||||
func createGmsaCustomResource(ns string, crdManifestContents string) (func(), error) {
|
||||
cleanUpFunc := func() {}
|
||||
|
||||
tempFile, err := ioutil.TempFile("", "")
|
||||
@ -272,7 +272,7 @@ func createGmsaCustomResource(crdManifestContents string) (func(), error) {
|
||||
defer tempFile.Close()
|
||||
|
||||
cleanUpFunc = func() {
|
||||
framework.RunKubectl("delete", "--filename", tempFile.Name())
|
||||
framework.RunKubectl(ns, "delete", "--filename", tempFile.Name())
|
||||
os.Remove(tempFile.Name())
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ func createGmsaCustomResource(crdManifestContents string) (func(), error) {
|
||||
return cleanUpFunc, err
|
||||
}
|
||||
|
||||
output, err := framework.RunKubectl("apply", "--filename", tempFile.Name())
|
||||
output, err := framework.RunKubectl(ns, "apply", "--filename", tempFile.Name())
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "unable to create custom resource, output:\n%s", output)
|
||||
}
|
||||
@ -392,5 +392,5 @@ func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string
|
||||
|
||||
func runKubectlExecInNamespace(namespace string, args ...string) (string, error) {
|
||||
namespaceOption := fmt.Sprintf("--namespace=%s", namespace)
|
||||
return framework.RunKubectl(append([]string{"exec", namespaceOption}, args...)...)
|
||||
return framework.RunKubectl(namespace, append([]string{"exec", namespaceOption}, args...)...)
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Kubelet [Slow]
|
||||
// note that the "eventually" part seems to be needed to account for the fact that powershell containers
|
||||
// are a bit slow to become responsive, even when docker reports them as running.
|
||||
gomega.Eventually(func() bool {
|
||||
output, err = framework.RunKubectl("exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN")
|
||||
output, err = framework.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN")
|
||||
return err == nil
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
|
||||
|
@ -195,7 +195,7 @@ func getNodeMemory(f *framework.Framework) nodeMemory {
|
||||
|
||||
nodeName := nodeList.Items[0].ObjectMeta.Name
|
||||
|
||||
kubeletConfig, err := getCurrentKubeletConfig(nodeName)
|
||||
kubeletConfig, err := getCurrentKubeletConfig(nodeName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
systemReserve, err := resource.ParseQuantity(kubeletConfig.SystemReserved["memory"])
|
||||
@ -250,9 +250,9 @@ func getTotalAllocatableMemory(f *framework.Framework) *resource.Quantity {
|
||||
}
|
||||
|
||||
// getCurrentKubeletConfig modified from test/e2e_node/util.go
|
||||
func getCurrentKubeletConfig(nodeName string) (*kubeletconfig.KubeletConfiguration, error) {
|
||||
func getCurrentKubeletConfig(nodeName, namespace string) (*kubeletconfig.KubeletConfiguration, error) {
|
||||
|
||||
resp := pollConfigz(5*time.Minute, 5*time.Second, nodeName)
|
||||
resp := pollConfigz(5*time.Minute, 5*time.Second, nodeName, namespace)
|
||||
kubeCfg, err := decodeConfigz(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -261,10 +261,10 @@ func getCurrentKubeletConfig(nodeName string) (*kubeletconfig.KubeletConfigurati
|
||||
}
|
||||
|
||||
// Causes the test to fail, or returns a status 200 response from the /configz endpoint
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName string) *http.Response {
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName, namespace string) *http.Response {
|
||||
// start local proxy, so we can send graceful deletion over query string, rather than body parameter
|
||||
ginkgo.By("Opening proxy to cluster")
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath)
|
||||
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
|
||||
cmd := tk.KubectlCmd("proxy", "-p", "0")
|
||||
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
|
||||
framework.ExpectNoError(err)
|
||||
|
Loading…
Reference in New Issue
Block a user