Merge pull request #51838 from MrHohn/kube-proxy-migration-tests

Automatic merge from submit-queue (batch tested with PRs 51733, 51838)

Decouple kube-proxy upgrade/downgrade tests from upgradeTests

**What this PR does / why we need it**:

Fixes the failing kube-proxy migration CI jobs:
- https://k8s-testgrid.appspot.com/sig-network#gci-gce-latest-upgrade-kube-proxy-ds
- https://k8s-testgrid.appspot.com/sig-network#gci-gce-latest-downgrade-kube-proxy-ds

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #51729

**Special notes for your reviewer**:

/assign @krousey @nicksardo 
Could you please take a look post code-freeze (I believe it is fixing things)? Thanks!

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-09-06 00:02:20 -07:00 committed by GitHub
commit 8b9f0ea5de
3 changed files with 242 additions and 6 deletions

View File

@ -59,6 +59,18 @@ var statefulsetUpgradeTests = []upgrades.Test{
&upgrades.CassandraUpgradeTest{},
}
var kubeProxyUpgradeTests = []upgrades.Test{
&upgrades.KubeProxyUpgradeTest{},
&upgrades.ServiceUpgradeTest{},
&upgrades.IngressUpgradeTest{},
}
var kubeProxyDowngradeTests = []upgrades.Test{
&upgrades.KubeProxyDowngradeTest{},
&upgrades.ServiceUpgradeTest{},
&upgrades.IngressUpgradeTest{},
}
var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
f := framework.NewDefaultFramework("cluster-upgrade")
@ -219,15 +231,13 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]", func() {
f := framework.NewDefaultFramework("kube-proxy-ds-migration")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks(upgradeTests)
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
})
Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() {
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
@ -248,11 +258,13 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, true))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() {
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
@ -274,7 +286,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
runUpgradeSuite(f, kubeProxyDowngradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})

View File

@ -14,6 +14,7 @@ go_library(
"etcd.go",
"horizontal_pod_autoscalers.go",
"ingress.go",
"kube_proxy_migration.go",
"mysql.go",
"secrets.go",
"services.go",
@ -32,10 +33,13 @@ go_library(
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

View File

@ -0,0 +1,220 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"time"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTestTimeout = time.Duration(5 * time.Minute)
clusterAddonLabelKey = "k8s-app"
clusterComponentKey = "component"
kubeProxyLabelName = "kube-proxy"
)
// KubeProxyUpgradeTest tests kube-proxy static pods -> DaemonSet upgrade path.
type KubeProxyUpgradeTest struct {
}
func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" }
// Setup verifies kube-proxy static pods is running before uprgade.
func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(HaveOccurred())
}
// Test validates if kube-proxy is migrated from static pods to DaemonSet.
func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
c := f.ClientSet
// Block until upgrade is done.
By("Waiting for upgrade to finish")
<-done
By("Waiting for kube-proxy static pods disappear")
Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(HaveOccurred())
By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(HaveOccurred())
}
// Teardown does nothing.
func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) {
}
// KubeProxyDowngradeTest tests kube-proxy DaemonSet -> static pods downgrade path.
type KubeProxyDowngradeTest struct {
}
func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" }
// Setup verifies kube-proxy DaemonSet is running before uprgade.
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(HaveOccurred())
}
// Test validates if kube-proxy is migrated from DaemonSet to static pods.
func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
c := f.ClientSet
// Block until upgrade is done.
By("Waiting for upgrade to finish")
<-done
By("Waiting for kube-proxy DaemonSet disappear")
Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(HaveOccurred())
By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(HaveOccurred())
}
// Teardown does nothing.
func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) {
}
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
condition := func() (bool, error) {
pods, err := getKubeProxyStaticPods(c)
if err != nil {
framework.Logf("Failed to get kube-proxy static pods: %v", err)
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
numberkubeProxyPods := 0
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodRunning {
numberkubeProxyPods = numberkubeProxyPods + 1
}
}
if numberkubeProxyPods != numberSchedulableNodes {
framework.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods running: %v", err)
}
return nil
}
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
condition := func() (bool, error) {
pods, err := getKubeProxyStaticPods(c)
if err != nil {
framework.Logf("Failed to get kube-proxy static pods: %v", err)
return false, nil
}
if len(pods.Items) != 0 {
framework.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods disappear: %v", err)
}
return nil
}
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
condition := func() (bool, error) {
daemonSets, err := getKubeProxyDaemonSet(c)
if err != nil {
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
return false, nil
}
if len(daemonSets.Items) != 1 {
framework.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
if numberkubeProxyPods != numberSchedulableNodes {
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet running: %v", err)
}
return nil
}
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
condition := func() (bool, error) {
daemonSets, err := getKubeProxyDaemonSet(c)
if err != nil {
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
return false, nil
}
if len(daemonSets.Items) != 0 {
framework.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet disappear: %v", err)
}
return nil
}
func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterComponentKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.Core().Pods(metav1.NamespaceSystem).List(listOpts)
}
func getKubeProxyDaemonSet(c clientset.Interface) (*extensions.DaemonSetList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.Extensions().DaemonSets(metav1.NamespaceSystem).List(listOpts)
}