diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 1fc9e7d0e1b..cef99fa6234 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -631,7 +631,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.Logf("Error pulling logs: %v", err) return false, nil } - tokenCount, err := parseInClusterClientLogs(logs) + tokenCount, err := ParseInClusterClientLogs(logs) if err != nil { return false, fmt.Errorf("inclusterclient reported an error: %v", err) } @@ -832,7 +832,8 @@ var _ = SIGDescribe("ServiceAccounts", func() { var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$") -func parseInClusterClientLogs(logs string) (int, error) { +// ParseInClusterClientLogs parses logs of pods using inclusterclient. +func ParseInClusterClientLogs(logs string) (int, error) { seenTokens := map[string]struct{}{} lines := strings.Split(logs, "\n") diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 8e912e30032..f232a5c363f 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -89,11 +89,15 @@ var kubeProxyDowngradeTests = []upgrades.Test{ &upgrades.ServiceUpgradeTest{}, } +var serviceaccountAdmissionControllerMigrationTests = []upgrades.Test{ + &upgrades.ServiceAccountAdmissionControllerMigrationTest{}, +} + // masterUpgrade upgrades master node on GCE/GKE. -func masterUpgrade(f *framework.Framework, v string) error { +func masterUpgrade(f *framework.Framework, v string, extraEnvs []string) error { switch framework.TestContext.Provider { case "gce": - return masterUpgradeGCE(v, false) + return masterUpgradeGCE(v, extraEnvs) case "gke": return framework.MasterUpgradeGKE(f.Namespace.Name, v) default: @@ -104,12 +108,11 @@ func masterUpgrade(f *framework.Framework, v string) error { // masterUpgradeGCEWithKubeProxyDaemonSet upgrades master node on GCE with enabling/disabling the daemon set of kube-proxy. // TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default. func masterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error { - return masterUpgradeGCE(v, enableKubeProxyDaemonSet) + return masterUpgradeGCE(v, []string{fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet)}) } -// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default. -func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error { - env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet)) +func masterUpgradeGCE(rawV string, extraEnvs []string) error { + env := append(os.Environ(), extraEnvs...) // TODO: Remove these variables when they're no longer needed for downgrades. if framework.TestContext.EtcdUpgradeVersion != "" && framework.TestContext.EtcdUpgradeStorage != "" { env = append(env, @@ -149,7 +152,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, masterUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) @@ -190,7 +193,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, clusterUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) @@ -223,7 +226,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() { target := upgCtx.Versions[1].Version.String() framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) @@ -271,7 +274,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, gpuUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) @@ -289,7 +292,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { start := time.Now() defer finalizeUpgradeTest(start, gpuUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) @@ -311,7 +314,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { target := upgCtx.Versions[1].Version.String() framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) } runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) @@ -337,7 +340,7 @@ var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", start := time.Now() defer finalizeUpgradeTest(start, statefulUpgradeTest) target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(masterUpgrade(f, target)) + framework.ExpectNoError(masterUpgrade(f, target, nil)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, target)) framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) @@ -410,6 +413,33 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" }) }) +var _ = SIGDescribe("[sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume]", func() { + f := framework.NewDefaultFramework("serviceaccount-admission-controller-migration") + + testFrameworks := createUpgradeFrameworks(serviceaccountAdmissionControllerMigrationTests) + ginkgo.Describe("master upgrade", func() { + ginkgo.It("should maintain a functioning cluster", func() { + upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget) + framework.ExpectNoError(err) + + testSuite := &junit.TestSuite{Name: "ServiceAccount admission controller migration"} + serviceaccountAdmissionControllerMigrationTest := &junit.TestCase{ + Name: "[sig-auth] serviceaccount-admission-controller-migration", + Classname: "upgrade_tests", + } + testSuite.TestCases = append(testSuite.TestCases, serviceaccountAdmissionControllerMigrationTest) + + upgradeFunc := func() { + start := time.Now() + defer finalizeUpgradeTest(start, serviceaccountAdmissionControllerMigrationTest) + target := upgCtx.Versions[1].Version.String() + framework.ExpectNoError(masterUpgrade(f, target, []string{"KUBE_FEATURE_GATES=BoundServiceAccountTokenVolume=true"})) + } + runUpgradeSuite(f, serviceaccountAdmissionControllerMigrationTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) + }) + }) +}) + type chaosMonkeyAdapter struct { test upgrades.Test testReport *junit.TestCase diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index 84a69cb87fd..6388165ec66 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -531,25 +531,33 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim // GetPodLogs returns the logs of the specified container (namespace/pod/container). func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { - return getPodLogsInternal(c, namespace, podName, containerName, false) + return getPodLogsInternal(c, namespace, podName, containerName, false, nil) +} + +// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp. +func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) { + sinceTime := metav1.NewTime(since) + return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime) } // GetPreviousPodLogs returns the logs of the previous instance of the // specified container (namespace/pod/container). func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { - return getPodLogsInternal(c, namespace, podName, containerName, true) + return getPodLogsInternal(c, namespace, podName, containerName, true, nil) } // utility function for gomega Eventually -func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { - logs, err := c.CoreV1().RESTClient().Get(). +func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time) (string, error) { + request := c.CoreV1().RESTClient().Get(). Resource("pods"). Namespace(namespace). Name(podName).SubResource("log"). Param("container", containerName). - Param("previous", strconv.FormatBool(previous)). - Do(context.TODO()). - Raw() + Param("previous", strconv.FormatBool(previous)) + if sinceTime != nil { + request.Param("sinceTime", sinceTime.Format(time.RFC3339)) + } + logs, err := request.Do(context.TODO()).Raw() if err != nil { return "", err } diff --git a/test/e2e/upgrades/BUILD b/test/e2e/upgrades/BUILD index 3bece0cc2e4..72ad542dc15 100644 --- a/test/e2e/upgrades/BUILD +++ b/test/e2e/upgrades/BUILD @@ -17,6 +17,7 @@ go_library( "mysql.go", "nvidia-gpu.go", "secrets.go", + "serviceaccount_admission_controller_migration.go", "services.go", "sysctl.go", "upgrade.go", @@ -34,11 +35,13 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//test/e2e/auth:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/autoscaling:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/kubectl:go_default_library", "//test/e2e/framework/node:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/security:go_default_library", "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/skipper:go_default_library", diff --git a/test/e2e/upgrades/serviceaccount_admission_controller_migration.go b/test/e2e/upgrades/serviceaccount_admission_controller_migration.go new file mode 100644 index 00000000000..f53fb349da7 --- /dev/null +++ b/test/e2e/upgrades/serviceaccount_admission_controller_migration.go @@ -0,0 +1,140 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrades + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + e2eauth "k8s.io/kubernetes/test/e2e/auth" + "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +const ( + podBeforeMigrationName = "pod-before-migration" + podAfterMigrationName = "pod-after-migration" +) + +// ServiceAccountAdmissionControllerMigrationTest test that a pod is functioning before and after +// a cluster upgrade. +type ServiceAccountAdmissionControllerMigrationTest struct { + pod *v1.Pod +} + +// Name returns the tracking name of the test. +func (ServiceAccountAdmissionControllerMigrationTest) Name() string { + return "[sig-auth] serviceaccount-admission-controller-migration" +} + +// Setup creates pod-before-migration which has legacy service account token. +func (t *ServiceAccountAdmissionControllerMigrationTest) Setup(f *framework.Framework) { + t.pod = createPod(f, podBeforeMigrationName) + inClusterClientMustWork(f, t.pod) +} + +// Test waits for the upgrade to complete, and then verifies pod-before-migration +// and pod-after-migration are able to make requests using in cluster config. +func (t *ServiceAccountAdmissionControllerMigrationTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { + ginkgo.By("Waiting for upgrade to finish") + <-done + + ginkgo.By("Starting post-upgrade check") + ginkgo.By("Checking pod-before-migration makes successful requests using in cluster config") + podBeforeMigration, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podBeforeMigrationName, metav1.GetOptions{}) + framework.ExpectNoError(err) + if podBeforeMigration.GetUID() != t.pod.GetUID() { + framework.Failf("Pod %q GetUID() = %q, want %q.", podBeforeMigration.Name, podBeforeMigration.GetUID(), t.pod.GetUID()) + } + if podBeforeMigration.Status.ContainerStatuses[0].RestartCount != 0 { + framework.Failf("Pod %q RestartCount = %d, want 0.", podBeforeMigration.Name, podBeforeMigration.Status.ContainerStatuses[0].RestartCount) + } + inClusterClientMustWork(f, podBeforeMigration) + + ginkgo.By("Checking pod-after-migration makes successful requests using in cluster config") + podAfterMigration := createPod(f, podAfterMigrationName) + if len(podAfterMigration.Spec.Volumes) != 1 || podAfterMigration.Spec.Volumes[0].Projected == nil { + framework.Failf("Pod %q Volumes[0].Projected.Sources = nil, want non-nil.", podAfterMigration.Name) + } + inClusterClientMustWork(f, podAfterMigration) + + ginkgo.By("Finishing post-upgrade check") +} + +// Teardown cleans up any remaining resources. +func (t *ServiceAccountAdmissionControllerMigrationTest) Teardown(f *framework.Framework) { + // rely on the namespace deletion to clean up everything +} + +func inClusterClientMustWork(f *framework.Framework, pod *v1.Pod) { + var logs string + since := time.Now() + if err := wait.PollImmediate(15*time.Second, 5*time.Minute, func() (done bool, err error) { + framework.Logf("Polling logs") + logs, err = e2epod.GetPodLogsSince(f.ClientSet, pod.Namespace, pod.Name, "inclusterclient", since) + if err != nil { + framework.Logf("Error pulling logs: %v", err) + return false, nil + } + numTokens, err := e2eauth.ParseInClusterClientLogs(logs) + if err != nil { + framework.Logf("Error parsing inclusterclient logs: %v", err) + return false, fmt.Errorf("inclusterclient reported an error: %v", err) + } + if numTokens == 0 { + framework.Logf("No authenticated API calls found") + return false, nil + } + return true, nil + }); err != nil { + framework.Failf("Unexpected error: %v\n%s", err, logs) + } +} + +// createPod creates a pod. +func createPod(f *framework.Framework, podName string) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: f.Namespace.Name, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: "inclusterclient", + Image: imageutils.GetE2EImage(imageutils.Agnhost), + Args: []string{"inclusterclient", "--poll-interval=5"}, + }}, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + framework.ExpectNoError(err) + framework.Logf("Created pod %s", podName) + + if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { + framework.Failf("Pod %q/%q never became ready", createdPod.Namespace, createdPod.Name) + } + + return createdPod +}