Merge pull request #40325 from krousey/upgrades

Automatic merge from submit-queue (batch tested with PRs 39260, 40216, 40213, 40325, 40333)

Adding framework to allow multiple upgrade tests

**What this PR does / why we need it**: This adds a framework for multiple tests to run during an upgrade. This also moves the existing services test to that framework.
This commit is contained in:
Kubernetes Submit Queue 2017-01-24 16:26:04 -08:00 committed by GitHub
commit 2229b1cf13
5 changed files with 238 additions and 110 deletions

View File

@ -153,6 +153,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/e2e_federation:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
@ -248,6 +249,7 @@ filegroup(
"//test/e2e/generated:all-srcs",
"//test/e2e/perftype:all-srcs",
"//test/e2e/testing-manifests:all-srcs",
"//test/e2e/upgrades:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -21,34 +21,37 @@ import (
"path"
"strings"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
)
// TODO(mikedanese): Add setup, validate, and teardown for:
// - secrets
// - volumes
// - persistent volumes
var upgradeTests = []upgrades.Test{
&upgrades.ServiceUpgradeTest{},
}
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
f := framework.NewDefaultFramework("cluster-upgrade")
framework.KubeDescribe("master upgrade", func() {
It("should maintain responsive services [Feature:MasterUpgrade]", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
})
for _, t := range upgradeTests {
cm.RegisterInterface(&chaosMonkeyAdapter{
test: t,
framework: f,
upgradeType: upgrades.MasterUpgrade,
})
}
cm.Do()
})
})
@ -61,24 +64,13 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceUpBeforeAndAfter(f, sem)
})
cm.Do()
})
It("should maintain responsive services [Feature:ExperimentalNodeUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
})
for _, t := range upgradeTests {
cm.RegisterInterface(&chaosMonkeyAdapter{
test: t,
framework: f,
upgradeType: upgrades.NodeUpgrade,
})
}
cm.Do()
})
})
@ -93,31 +85,36 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceUpBeforeAndAfter(f, sem)
})
cm.Do()
})
It("should maintain responsive services [Feature:ExperimentalClusterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
})
for _, t := range upgradeTests {
cm.RegisterInterface(&chaosMonkeyAdapter{
test: t,
framework: f,
upgradeType: upgrades.ClusterUpgrade,
})
}
cm.Do()
})
})
})
type chaosMonkeyAdapter struct {
test upgrades.Test
framework *framework.Framework
upgradeType upgrades.UpgradeType
}
func (cma *chaosMonkeyAdapter) Setup() {
cma.test.Setup(cma.framework)
}
func (cma *chaosMonkeyAdapter) Test(stopCh <-chan struct{}) {
cma.test.Test(cma.framework, stopCh, cma.upgradeType)
}
func (cma *chaosMonkeyAdapter) Teardown() {
cma.test.Teardown(cma.framework)
}
// realVersion turns a version constant s into a version string deployable on
// GKE. See hack/get-build.sh for more information.
func realVersion(s string) (string, error) {
@ -130,67 +127,6 @@ func realVersion(s string) (string, error) {
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil
}
func testServiceUpBeforeAndAfter(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testService(f, sem, false)
}
func testServiceRemainsUp(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testService(f, sem, true)
}
// testService is a helper for testServiceUpBeforeAndAfter and testServiceRemainsUp with a flag for testDuringDisruption
//
// TODO(ihmccreery) remove this abstraction once testServiceUpBeforeAndAfter is no longer needed, because node upgrades
// maintain a responsive service.
func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
// Setup
serviceName := "service-test"
jig := framework.NewServiceTestJig(f.ClientSet, serviceName)
// nodeIP := framework.PickNodeIP(jig.Client) // for later
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
// TODO it's weird that we have to do this and then wait WaitForLoadBalancer which changes
// tcpService.
tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer
})
tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
// Get info to hit it with
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName)
// TODO newRCTemplate only allows for the creation of one replica... that probably won't
// work so well.
jig.RunOrFail(f.Namespace.Name, nil)
// Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
sem.Ready()
if testDuringDisruption {
// Continuous validation
wait.Until(func() {
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.Poll)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without checking if service remains up")
<-sem.StopCh
}
// Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
}
func checkMasterVersion(c clientset.Interface, want string) error {
framework.Logf("Checking master version")
v, err := c.Discovery().ServerVersion()

36
test/e2e/upgrades/BUILD Normal file
View File

@ -0,0 +1,36 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"services.go",
"upgrade.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//test/e2e/framework:go_default_library",
"//vendor:github.com/onsi/ginkgo",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,102 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// ServiceUpgradeTest tests that a service is available before and
// after a cluster upgrade. During a master-only upgrade, it will test
// that a service remains available during the upgrade.
type ServiceUpgradeTest struct {
jig *framework.ServiceTestJig
tcpService *v1.Service
tcpIngressIP string
svcPort int
}
// Setup creates a service with a load balancer and makes sure it's reachable.
func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
serviceName := "service-test"
jig := framework.NewServiceTestJig(f.ClientSet, serviceName)
// Grab a unique namespace so we don't collide.
ns, err := f.CreateNamespace("service-upgrade", nil)
framework.ExpectNoError(err)
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer
})
tcpService = jig.WaitForLoadBalancerOrFail(ns.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
// Get info to hit it with
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName)
jig.RunOrFail(ns.Name, nil)
// Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig = jig
t.tcpService = tcpService
t.tcpIngressIP = tcpIngressIP
t.svcPort = svcPort
}
// Test runs a connectivity check to the service.
func (t *ServiceUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
switch upgrade {
case MasterUpgrade:
t.test(f, done, true)
default:
t.test(f, done, false)
}
}
// Teardown cleans up any remaining resources.
func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
// Continuous validation
By("continuously hitting the pod through the service's LoadBalancer")
wait.Until(func() {
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.Poll)
}, framework.Poll, done)
} else {
// Block until upgrade is done
By("waiting for upgrade to finish without checking if service remains up")
<-done
}
// Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer")
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package upgrades provides a framework for testing Kubernetes
// features before, during, and after different types of upgrades.
package upgrades
import "k8s.io/kubernetes/test/e2e/framework"
// UpgradeType represents different types of upgrades.
type UpgradeType int
const (
// MasterUpgrade indicates that only the master is being upgraded.
MasterUpgrade UpgradeType = iota
// NodeUpgrade indicates that only the nodes are being upgraded.
NodeUpgrade
// ClusterUpgrade indicates that both master and nodes are
// being upgraded.
ClusterUpgrade
)
// Test is an interface for upgrade tests.
type Test interface {
// Setup should create and verify whatever objects need to
// exist before the upgrade disruption starts.
Setup(f *framework.Framework)
// Test will run during the upgrade. When the upgrade is
// complete, done will be closed and final validation can
// begin.
Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType)
// TearDown should clean up any objects that are created that
// aren't already cleaned up by the framework.
Teardown(f *framework.Framework)
}