fix golint failures for test/e2e/upgrades

This commit is contained in:
danielqsj 2019-02-25 16:36:26 +08:00
parent 7c8498ab03
commit 6322025d5c
14 changed files with 184 additions and 168 deletions

View File

@ -687,7 +687,6 @@ test/e2e/storage/testsuites
test/e2e/storage/utils test/e2e/storage/utils
test/e2e/storage/vsphere test/e2e/storage/vsphere
test/e2e/ui test/e2e/ui
test/e2e/upgrades
test/e2e/windows test/e2e/windows
test/e2e_kubeadm test/e2e_kubeadm
test/e2e_node test/e2e_node

View File

@ -66,7 +66,7 @@ var gpuUpgradeTests = []upgrades.Test{
} }
var statefulsetUpgradeTests = []upgrades.Test{ var statefulsetUpgradeTests = []upgrades.Test{
&upgrades.MySqlUpgradeTest{}, &upgrades.MySQLUpgradeTest{},
&upgrades.EtcdUpgradeTest{}, &upgrades.EtcdUpgradeTest{},
&upgrades.CassandraUpgradeTest{}, &upgrades.CassandraUpgradeTest{},
} }

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct" "github.com/onsi/gomega/gstruct"
) )
@ -32,8 +32,10 @@ type AppArmorUpgradeTest struct {
pod *api.Pod pod *api.Pod
} }
// Name returns the tracking name of the test.
func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" } func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" }
// Skip returns true when this test can be skipped.
func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool { func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
supportedImages := make(map[string]bool) supportedImages := make(map[string]bool)
for _, d := range common.AppArmorDistros { for _, d := range common.AppArmorDistros {
@ -50,11 +52,11 @@ func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
// Setup creates a secret and then verifies that a pod can consume it. // Setup creates a secret and then verifies that a pod can consume it.
func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) { func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
By("Loading AppArmor profiles to nodes") ginkgo.By("Loading AppArmor profiles to nodes")
common.LoadAppArmorProfiles(f) common.LoadAppArmorProfiles(f)
// Create the initial test pod. // Create the initial test pod.
By("Creating a long-running AppArmor enabled pod.") ginkgo.By("Creating a long-running AppArmor enabled pod.")
t.pod = common.CreateAppArmorTestPod(f, false, false) t.pod = common.CreateAppArmorTestPod(f, false, false)
// Verify initial state. // Verify initial state.
@ -76,32 +78,32 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything // rely on the namespace deletion to clean up everything
By("Logging container failures") ginkgo.By("Logging container failures")
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
} }
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
By("Verifying an AppArmor profile is continuously enforced for a pod") ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{}) pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod") framework.ExpectNoError(err, "Should be able to get pod")
Expect(pod.Status.Phase).To(Equal(api.PodRunning), "Pod should stay running") gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(BeNil(), "Container should be running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeZero(), "Container should not need to be restarted") gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
} }
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
By("Verifying an AppArmor profile is enforced for a new pod") ginkgo.By("Verifying an AppArmor profile is enforced for a new pod")
common.CreateAppArmorTestPod(f, false, true) common.CreateAppArmorTestPod(f, false, true)
} }
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
By("Verifying nodes are AppArmor enabled") ginkgo.By("Verifying nodes are AppArmor enabled")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list nodes") framework.ExpectNoError(err, "Failed to list nodes")
for _, node := range nodes.Items { for _, node := range nodes.Items {
Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{ gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
"Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
"Message": ContainSubstring("AppArmor enabled"), "Message": gomega.ContainSubstring("AppArmor enabled"),
}), }),
})) }))
} }

View File

@ -25,8 +25,8 @@ import (
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func cassandraKubectlCreate(ns, file string) { func cassandraKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), Fail)) input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
@ -75,16 +75,16 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
statefulsetTimeout := 10 * time.Minute statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet) t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB") ginkgo.By("Creating a PDB")
cassandraKubectlCreate(ns, "pdb.yaml") cassandraKubectlCreate(ns, "pdb.yaml")
By("Creating a Cassandra StatefulSet") ginkgo.By("Creating a Cassandra StatefulSet")
t.ssTester.CreateStatefulSet(cassandraManifestPath, ns) t.ssTester.CreateStatefulSet(cassandraManifestPath, ns)
By("Creating a cassandra-test-server deployment") ginkgo.By("Creating a cassandra-test-server deployment")
cassandraKubectlCreate(ns, "tester.yaml") cassandraKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services") ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
@ -95,18 +95,18 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up") framework.Logf("Service endpoint is up")
By("Adding 2 dummy users") ginkgo.By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
t.successfulWrites = 2 t.successfulWrites = 2
By("Verifying that the users exist") ginkgo.By("Verifying that the users exist")
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users)).To(Equal(2)) gomega.Expect(len(users)).To(gomega.Equal(2))
} }
// listUsers gets a list of users from the db via the tester service. // listUsers gets a list of users from the db via the tester service.
@ -151,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
// getServiceIP is a helper method to extract the Ingress IP from the service. // getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 { if len(ingress) == 0 {
return "" return ""
@ -165,7 +165,7 @@ func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName
// ratio is over a certain threshold (0.75). We also verify that we get // ratio is over a certain threshold (0.75). We also verify that we get
// at least the same number of rows back as we successfully wrote. // at least the same number of rows back as we successfully wrote.
func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.") ginkgo.By("Continuously polling the database during upgrade.")
var ( var (
success, failures, writeAttempts, lastUserCount int success, failures, writeAttempts, lastUserCount int
mu sync.Mutex mu sync.Mutex
@ -199,19 +199,19 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
}, 10*time.Millisecond, done) }, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue()) gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures) ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts) ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors) framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs. // TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue()) gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
} }
// Teardown does one final check of the data's availability. // Teardown does one final check of the data's availability.
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) { func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue()) gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
} }

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
) )
@ -34,6 +34,7 @@ type ConfigMapUpgradeTest struct {
configMap *v1.ConfigMap configMap *v1.ConfigMap
} }
// Name returns the tracking name of the test.
func (ConfigMapUpgradeTest) Name() string { func (ConfigMapUpgradeTest) Name() string {
return "[sig-storage] [sig-api-machinery] configmap-upgrade" return "[sig-storage] [sig-api-machinery] configmap-upgrade"
} }
@ -54,13 +55,13 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
}, },
} }
By("Creating a ConfigMap") ginkgo.By("Creating a ConfigMap")
var err error var err error
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil { if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
} }
By("Making sure the ConfigMap is consumable") ginkgo.By("Making sure the ConfigMap is consumable")
t.testPod(f) t.testPod(f)
} }
@ -68,7 +69,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the ConfigMap. // pod can still consume the ConfigMap.
func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done <-done
By("Consuming the ConfigMap after upgrade") ginkgo.By("Consuming the ConfigMap after upgrade")
t.testPod(f) t.testPod(f)
} }

View File

@ -25,8 +25,8 @@ import (
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
@ -37,14 +37,17 @@ import (
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd" const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
// EtcdUpgradeTest tests that etcd is writable before and after a cluster upgrade.
type EtcdUpgradeTest struct { type EtcdUpgradeTest struct {
ip string ip string
successfulWrites int successfulWrites int
ssTester *framework.StatefulSetTester ssTester *framework.StatefulSetTester
} }
// Name returns the tracking name of the test.
func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" } func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" }
// Skip returns true when this test can be skipped.
func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool { func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.6.0") minVersion := version.MustParseSemantic("1.6.0")
for _, vCtx := range upgCtx.Versions { for _, vCtx := range upgCtx.Versions {
@ -56,26 +59,27 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func kubectlCreate(ns, file string) { func kubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), Fail)) input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
// Setup creates etcd statefulset and then verifies that the etcd is writable.
func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet) t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB") ginkgo.By("Creating a PDB")
kubectlCreate(ns, "pdb.yaml") kubectlCreate(ns, "pdb.yaml")
By("Creating an etcd StatefulSet") ginkgo.By("Creating an etcd StatefulSet")
t.ssTester.CreateStatefulSet(manifestPath, ns) t.ssTester.CreateStatefulSet(manifestPath, ns)
By("Creating an etcd--test-server deployment") ginkgo.By("Creating an etcd--test-server deployment")
kubectlCreate(ns, "tester.yaml") kubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services") ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
@ -86,18 +90,18 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up") framework.Logf("Service endpoint is up")
By("Adding 2 dummy users") ginkgo.By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
t.successfulWrites = 2 t.successfulWrites = 2
By("Verifying that the users exist") ginkgo.By("Verifying that the users exist")
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users)).To(Equal(2)) gomega.Expect(len(users)).To(gomega.Equal(2))
} }
func (t *EtcdUpgradeTest) listUsers() ([]string, error) { func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
@ -139,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 { if len(ingress) == 0 {
return "" return ""
@ -147,8 +151,9 @@ func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName strin
return ingress[0].IP return ingress[0].IP
} }
// Test waits for upgrade to complete and verifies if etcd is writable.
func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.") ginkgo.By("Continuously polling the database during upgrade.")
var ( var (
success, failures, writeAttempts, lastUserCount int success, failures, writeAttempts, lastUserCount int
mu sync.Mutex mu sync.Mutex
@ -182,19 +187,19 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
}, 10*time.Millisecond, done) }, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue()) gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures) ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts) ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors) framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs. // TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue()) gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
} }
// Teardown does one final check of the data's availability. // Teardown does one final check of the data's availability.
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) { func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue()) gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
} }

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade. // HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
@ -33,9 +33,10 @@ type HPAUpgradeTest struct {
hpa *autoscalingv1.HorizontalPodAutoscaler hpa *autoscalingv1.HorizontalPodAutoscaler
} }
// Name returns the tracking name of the test.
func (HPAUpgradeTest) Name() string { return "hpa-upgrade" } func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
// Creates a resource consumer and an HPA object that autoscales the consumer. // Setup creates a resource consumer and an HPA object that autoscales the consumer.
func (t *HPAUpgradeTest) Setup(f *framework.Framework) { func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
t.rc = common.NewDynamicResourceConsumer( t.rc = common.NewDynamicResourceConsumer(
"res-cons-upgrade", "res-cons-upgrade",
@ -63,7 +64,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
// Test waits for upgrade to complete and verifies if HPA works correctly. // Test waits for upgrade to complete and verifies if HPA works correctly.
func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
// Block until upgrade is done // Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA")) ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
<-done <-done
t.test() t.test()
} }
@ -79,19 +80,19 @@ func (t *HPAUpgradeTest) test() {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
t.rc.Resume() t.rc.Resume()
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1.")) ginkgo.By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
t.rc.ConsumeCPU(10) /* millicores */ t.rc.ConsumeCPU(10) /* millicores */
By(fmt.Sprintf("HPA waits for 1 replica")) ginkgo.By(fmt.Sprintf("HPA waits for 1 replica"))
t.rc.WaitForReplicas(1, timeToWait) t.rc.WaitForReplicas(1, timeToWait)
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores.")) ginkgo.By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
t.rc.ConsumeCPU(250) /* millicores */ t.rc.ConsumeCPU(250) /* millicores */
By(fmt.Sprintf("HPA waits for 3 replicas")) ginkgo.By(fmt.Sprintf("HPA waits for 3 replicas"))
t.rc.WaitForReplicas(3, timeToWait) t.rc.WaitForReplicas(3, timeToWait)
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5.")) ginkgo.By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
t.rc.ConsumeCPU(700) /* millicores */ t.rc.ConsumeCPU(700) /* millicores */
By(fmt.Sprintf("HPA waits for 5 replicas")) ginkgo.By(fmt.Sprintf("HPA waits for 5 replicas"))
t.rc.WaitForReplicas(5, timeToWait) t.rc.WaitForReplicas(5, timeToWait)
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail. // We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.

View File

@ -24,7 +24,7 @@ import (
"reflect" "reflect"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
compute "google.golang.org/api/compute/v1" compute "google.golang.org/api/compute/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -61,10 +61,11 @@ type GCPResourceStore struct {
TpsList []*compute.TargetHttpsProxy TpsList []*compute.TargetHttpsProxy
SslList []*compute.SslCertificate SslList []*compute.SslCertificate
BeList []*compute.BackendService BeList []*compute.BackendService
Ip *compute.Address IP *compute.Address
IgList []*compute.InstanceGroup IgList []*compute.InstanceGroup
} }
// Name returns the tracking name of the test.
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" } func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
// Setup creates a GLBC, allocates an ip, and an ingress resource, // Setup creates a GLBC, allocates an ip, and an ingress resource,
@ -96,17 +97,17 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
t.ip = t.gceController.CreateStaticIP(t.ipName) t.ip = t.gceController.CreateStaticIP(t.ipName)
// Create a working basic Ingress // Create a working basic Ingress
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip)) ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{ jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
ingress.IngressStaticIPKey: t.ipName, ingress.IngressStaticIPKey: t.ipName,
ingress.IngressAllowHTTPKey: "false", ingress.IngressAllowHTTPKey: "false",
}, map[string]string{}) }, map[string]string{})
t.jig.SetHTTPS("tls-secret", "ingress.test.com") t.jig.SetHTTPS("tls-secret", "ingress.test.com")
By("waiting for Ingress to come up with ip: " + t.ip) ginkgo.By("waiting for Ingress to come up with ip: " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
By("keeping track of GCP resources created by Ingress") ginkgo.By("keeping track of GCP resources created by Ingress")
t.resourceStore = &GCPResourceStore{} t.resourceStore = &GCPResourceStore{}
t.populateGCPResourceStore(t.resourceStore) t.populateGCPResourceStore(t.resourceStore)
} }
@ -134,18 +135,18 @@ func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) { func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(t.gceController.Ns) framework.DescribeIng(t.gceController.Ns)
} }
if t.jig.Ingress != nil { if t.jig.Ingress != nil {
By("Deleting ingress") ginkgo.By("Deleting ingress")
t.jig.TryDeleteIngress() t.jig.TryDeleteIngress()
} else { } else {
By("No ingress created, no cleanup necessary") ginkgo.By("No ingress created, no cleanup necessary")
} }
By("Cleaning up cloud resources") ginkgo.By("Cleaning up cloud resources")
framework.ExpectNoError(t.gceController.CleanupGCEIngressController()) framework.ExpectNoError(t.gceController.CleanupGCEIngressController())
} }
@ -171,20 +172,20 @@ func (t *IngressUpgradeTest) Skip(upgCtx UpgradeContext) bool {
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) { func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption { if testDuringDisruption {
By("continuously hitting the Ingress IP") ginkgo.By("continuously hitting the Ingress IP")
wait.Until(func() { wait.Until(func() {
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}, t.jig.PollInterval, done) }, t.jig.PollInterval, done)
} else { } else {
By("waiting for upgrade to finish without checking if Ingress remains up") ginkgo.By("waiting for upgrade to finish without checking if Ingress remains up")
<-done <-done
} }
By("hitting the Ingress IP " + t.ip) ginkgo.By("hitting the Ingress IP " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
// We want to manually trigger a sync because then we can easily verify // We want to manually trigger a sync because then we can easily verify
// a correct sync completed after update. // a correct sync completed after update.
By("updating ingress spec to manually trigger a sync") ginkgo.By("updating ingress spec to manually trigger a sync")
t.jig.Update(func(ing *extensions.Ingress) { t.jig.Update(func(ing *extensions.Ingress) {
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append( ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths, ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
@ -197,7 +198,7 @@ func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}
// WaitForIngress() tests that all paths are pinged, which is how we know // WaitForIngress() tests that all paths are pinged, which is how we know
// everything is synced with the cloud. // everything is synced with the cloud.
t.jig.WaitForIngress(false) t.jig.WaitForIngress(false)
By("comparing GCP resources post-upgrade") ginkgo.By("comparing GCP resources post-upgrade")
postUpgradeResourceStore := &GCPResourceStore{} postUpgradeResourceStore := &GCPResourceStore{}
t.populateGCPResourceStore(postUpgradeResourceStore) t.populateGCPResourceStore(postUpgradeResourceStore)
@ -238,7 +239,7 @@ func (t *IngressUpgradeTest) populateGCPResourceStore(resourceStore *GCPResource
resourceStore.TpsList = cont.ListTargetHttpsProxies() resourceStore.TpsList = cont.ListTargetHttpsProxies()
resourceStore.SslList = cont.ListSslCertificates() resourceStore.SslList = cont.ListSslCertificates()
resourceStore.BeList = cont.ListGlobalBackendServices() resourceStore.BeList = cont.ListGlobalBackendServices()
resourceStore.Ip = cont.GetGlobalAddress(t.ipName) resourceStore.IP = cont.GetGlobalAddress(t.ipName)
resourceStore.IgList = cont.ListInstanceGroups() resourceStore.IgList = cont.ListInstanceGroups()
} }

View File

@ -28,8 +28,8 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -43,12 +43,13 @@ const (
type KubeProxyUpgradeTest struct { type KubeProxyUpgradeTest struct {
} }
// Name returns the tracking name of the test.
func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" } func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" }
// Setup verifies kube-proxy static pods is running before uprgade. // Setup verifies kube-proxy static pods is running before uprgade.
func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) { func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy static pods running and ready") ginkgo.By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
} }
// Test validates if kube-proxy is migrated from static pods to DaemonSet. // Test validates if kube-proxy is migrated from static pods to DaemonSet.
@ -56,14 +57,14 @@ func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
c := f.ClientSet c := f.ClientSet
// Block until upgrade is done. // Block until upgrade is done.
By("Waiting for upgrade to finish") ginkgo.By("Waiting for upgrade to finish")
<-done <-done
By("Waiting for kube-proxy static pods disappear") ginkgo.By("Waiting for kube-proxy static pods disappear")
Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(gomega.HaveOccurred())
By("Waiting for kube-proxy DaemonSet running and ready") ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(gomega.HaveOccurred())
} }
// Teardown does nothing. // Teardown does nothing.
@ -74,12 +75,13 @@ func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) {
type KubeProxyDowngradeTest struct { type KubeProxyDowngradeTest struct {
} }
// Name returns the tracking name of the test.
func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" } func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" }
// Setup verifies kube-proxy DaemonSet is running before uprgade. // Setup verifies kube-proxy DaemonSet is running before uprgade.
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) { func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy DaemonSet running and ready") ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
} }
// Test validates if kube-proxy is migrated from DaemonSet to static pods. // Test validates if kube-proxy is migrated from DaemonSet to static pods.
@ -87,14 +89,14 @@ func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct
c := f.ClientSet c := f.ClientSet
// Block until upgrade is done. // Block until upgrade is done.
By("Waiting for upgrade to finish") ginkgo.By("Waiting for upgrade to finish")
<-done <-done
By("Waiting for kube-proxy DaemonSet disappear") ginkgo.By("Waiting for kube-proxy DaemonSet disappear")
Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(gomega.HaveOccurred())
By("Waiting for kube-proxy static pods running and ready") ginkgo.By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(gomega.HaveOccurred())
} }
// Teardown does nothing. // Teardown does nothing.

View File

@ -25,8 +25,8 @@ import (
"strconv" "strconv"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
@ -37,17 +37,19 @@ import (
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade" const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
// MySqlUpgradeTest implements an upgrade test harness that polls a replicated sql database. // MySQLUpgradeTest implements an upgrade test harness that polls a replicated sql database.
type MySqlUpgradeTest struct { type MySQLUpgradeTest struct {
ip string ip string
successfulWrites int successfulWrites int
nextWrite int nextWrite int
ssTester *framework.StatefulSetTester ssTester *framework.StatefulSetTester
} }
func (MySqlUpgradeTest) Name() string { return "mysql-upgrade" } // Name returns the tracking name of the test.
func (MySQLUpgradeTest) Name() string { return "mysql-upgrade" }
func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool { // Skip returns true when this test can be skipped.
func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0") minVersion := version.MustParseSemantic("1.5.0")
for _, vCtx := range upgCtx.Versions { for _, vCtx := range upgCtx.Versions {
@ -59,13 +61,13 @@ func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func mysqlKubectlCreate(ns, file string) { func mysqlKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), Fail)) input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 { if len(ingress) == 0 {
return "" return ""
@ -77,22 +79,22 @@ func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName stri
// from the db. It then connects to the db with the write Service and populates the db with a table // from the db. It then connects to the db with the write Service and populates the db with a table
// and a few entries. Finally, it connects to the db with the read Service, and confirms the data is // and a few entries. Finally, it connects to the db with the read Service, and confirms the data is
// available. The db connections are left open to be used later in the test. // available. The db connections are left open to be used later in the test.
func (t *MySqlUpgradeTest) Setup(f *framework.Framework) { func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet) t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a configmap") ginkgo.By("Creating a configmap")
mysqlKubectlCreate(ns, "configmap.yaml") mysqlKubectlCreate(ns, "configmap.yaml")
By("Creating a mysql StatefulSet") ginkgo.By("Creating a mysql StatefulSet")
t.ssTester.CreateStatefulSet(mysqlManifestPath, ns) t.ssTester.CreateStatefulSet(mysqlManifestPath, ns)
By("Creating a mysql-test-server deployment") ginkgo.By("Creating a mysql-test-server deployment")
mysqlKubectlCreate(ns, "tester.yaml") mysqlKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the test-service") ginkgo.By("Getting the ingress IPs from the test-service")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
@ -103,24 +105,24 @@ func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up") framework.Logf("Service endpoint is up")
By("Adding 2 names to the database") ginkgo.By("Adding 2 names to the database")
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred()) gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred()) gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
By("Verifying that the 2 names have been inserted") ginkgo.By("Verifying that the 2 names have been inserted")
count, err := t.countNames() count, err := t.countNames()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(count).To(Equal(2)) gomega.Expect(count).To(gomega.Equal(2))
} }
// Test continually polls the db using the read and write connections, inserting data, and checking // Test continually polls the db using the read and write connections, inserting data, and checking
// that all the data is readable. // that all the data is readable.
func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
var writeSuccess, readSuccess, writeFailure, readFailure int var writeSuccess, readSuccess, writeFailure, readFailure int
By("Continuously polling the database during upgrade.") ginkgo.By("Continuously polling the database during upgrade.")
go wait.Until(func() { go wait.Until(func() {
_, err := t.countNames() _, err := t.countNames()
if err != nil { if err != nil {
@ -162,14 +164,14 @@ func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
} }
// Teardown performs one final check of the data's availability. // Teardown performs one final check of the data's availability.
func (t *MySqlUpgradeTest) Teardown(f *framework.Framework) { func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) {
count, err := t.countNames() count, err := t.countNames()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(count >= t.successfulWrites).To(BeTrue()) gomega.Expect(count >= t.successfulWrites).To(gomega.BeTrue())
} }
// addName adds a new value to the db. // addName adds a new value to the db.
func (t *MySqlUpgradeTest) addName(name string) error { func (t *MySQLUpgradeTest) addName(name string) error {
val := map[string][]string{"name": {name}} val := map[string][]string{"name": {name}}
t.nextWrite++ t.nextWrite++
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val) r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val)
@ -189,7 +191,7 @@ func (t *MySqlUpgradeTest) addName(name string) error {
// countNames checks to make sure the values in testing.users are available, and returns // countNames checks to make sure the values in testing.users are available, and returns
// the count of them. // the count of them.
func (t *MySqlUpgradeTest) countNames() (int, error) { func (t *MySQLUpgradeTest) countNames() (int, error) {
r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip)) r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip))
if err != nil { if err != nil {
return 0, err return 0, err

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
// NvidiaGPUUpgradeTest tests that gpu resource is available before and after // NvidiaGPUUpgradeTest tests that gpu resource is available before and after
@ -35,12 +35,13 @@ import (
type NvidiaGPUUpgradeTest struct { type NvidiaGPUUpgradeTest struct {
} }
// Name returns the tracking name of the test.
func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" } func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" }
// Setup creates a job requesting gpu. // Setup creates a job requesting gpu.
func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) { func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false) scheduling.SetupNVIDIAGPUNode(f, false)
By("Creating a job requesting gpu") ginkgo.By("Creating a job requesting gpu")
t.startJob(f) t.startJob(f)
} }
@ -48,13 +49,13 @@ func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
// cuda pod started by the gpu job can successfully finish. // cuda pod started by the gpu job can successfully finish.
func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done <-done
By("Verifying gpu job success") ginkgo.By("Verifying gpu job success")
t.verifyJobPodSuccess(f) t.verifyJobPodSuccess(f)
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade { if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
// MasterUpgrade should be totally hitless. // MasterUpgrade should be totally hitless.
job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add") job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(job.Status.Failed).To(BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed) gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
} }
} }
@ -85,12 +86,12 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
} }
ns := f.Namespace.Name ns := f.Namespace.Name
_, err := framework.CreateJob(f.ClientSet, ns, testJob) _, err := framework.CreateJob(f.ClientSet, ns, testJob)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Created job %v", testJob) framework.Logf("Created job %v", testJob)
By("Waiting for gpu job pod start") ginkgo.By("Waiting for gpu job pod start")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1) err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Done with gpu job pod start") ginkgo.By("Done with gpu job pod start")
} }
// verifyJobPodSuccess verifies that the started cuda pod successfully passes. // verifyJobPodSuccess verifies that the started cuda pod successfully passes.
@ -98,9 +99,9 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
// Wait for client pod to complete. // Wait for client pod to complete.
ns := f.Namespace.Name ns := f.Namespace.Name
err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1) err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add") pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
createdPod := pods.Items[0].Name createdPod := pods.Items[0].Name
framework.Logf("Created pod %v", createdPod) framework.Logf("Created pod %v", createdPod)
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute) f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
@ -108,5 +109,5 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
framework.ExpectNoError(err, "Should be able to get pod logs") framework.ExpectNoError(err, "Should be able to get pod logs")
framework.Logf("Got pod logs: %v", logs) framework.Logf("Got pod logs: %v", logs)
regex := regexp.MustCompile("PASSED") regex := regexp.MustCompile("PASSED")
Expect(regex.MatchString(logs)).To(BeTrue()) gomega.Expect(regex.MatchString(logs)).To(gomega.BeTrue())
} }

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
// SecretUpgradeTest test that a secret is available before and after // SecretUpgradeTest test that a secret is available before and after
@ -34,6 +34,7 @@ type SecretUpgradeTest struct {
secret *v1.Secret secret *v1.Secret
} }
// Name returns the tracking name of the test.
func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" } func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" }
// Setup creates a secret and then verifies that a pod can consume it. // Setup creates a secret and then verifies that a pod can consume it.
@ -52,13 +53,13 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
}, },
} }
By("Creating a secret") ginkgo.By("Creating a secret")
var err error var err error
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil { if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
} }
By("Making sure the secret is consumable") ginkgo.By("Making sure the secret is consumable")
t.testPod(f) t.testPod(f)
} }
@ -66,7 +67,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the secret. // pod can still consume the secret.
func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done <-done
By("Consuming the secret after upgrade") ginkgo.By("Consuming the secret after upgrade")
t.testPod(f) t.testPod(f)
} }

View File

@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
// ServiceUpgradeTest tests that a service is available before and // ServiceUpgradeTest tests that a service is available before and
@ -34,6 +34,7 @@ type ServiceUpgradeTest struct {
svcPort int svcPort int
} }
// Name returns the tracking name of the test.
func (ServiceUpgradeTest) Name() string { return "service-upgrade" } func (ServiceUpgradeTest) Name() string { return "service-upgrade" }
func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") } func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
@ -45,7 +46,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace ns := f.Namespace
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name) ginkgo.By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) { tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer s.Spec.Type = v1.ServiceTypeLoadBalancer
}) })
@ -56,16 +57,16 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port) svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName) ginkgo.By("creating pod to be part of service " + serviceName)
rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity) rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity)
if shouldTestPDBs() { if shouldTestPDBs() {
By("creating a PodDisruptionBudget to cover the ReplicationController") ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
jig.CreatePDBOrFail(ns.Name, rc) jig.CreatePDBOrFail(ns.Name, rc)
} }
// Hit it once before considering ourselves ready // Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer") ginkgo.By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig = jig t.jig = jig
@ -95,18 +96,18 @@ func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) {
func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) { func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption { if testDuringDisruption {
// Continuous validation // Continuous validation
By("continuously hitting the pod through the service's LoadBalancer") ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
wait.Until(func() { wait.Until(func() {
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault) t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
}, framework.Poll, done) }, framework.Poll, done)
} else { } else {
// Block until upgrade is done // Block until upgrade is done
By("waiting for upgrade to finish without checking if service remains up") ginkgo.By("waiting for upgrade to finish without checking if service remains up")
<-done <-done
} }
// Sanity check and hit it once more // Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer") ginkgo.By("hitting the pod through the service's LoadBalancer")
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault) t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer) t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
} }

View File

@ -19,8 +19,8 @@ package upgrades
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
@ -32,7 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
// SecretUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During // SysctlUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
// a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is // a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is
// expected to keep failing before and after the upgrade. // expected to keep failing before and after the upgrade.
type SysctlUpgradeTest struct { type SysctlUpgradeTest struct {
@ -53,19 +53,19 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
<-done <-done
switch upgrade { switch upgrade {
case MasterUpgrade, ClusterUpgrade: case MasterUpgrade, ClusterUpgrade:
By("Checking the safe sysctl pod keeps running on master upgrade") ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(pod.Status.Phase).To(Equal(v1.PodRunning)) gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
} }
By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
if err == nil { if err == nil {
Expect(pod.Status.Phase).NotTo(Equal(v1.PodRunning)) gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
} }
t.verifySafeSysctlWork(f) t.verifySafeSysctlWork(f)
@ -78,15 +78,15 @@ func (t *SysctlUpgradeTest) Teardown(f *framework.Framework) {
} }
func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod { func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod {
By("Creating a pod with safe sysctls") ginkgo.By("Creating a pod with safe sysctls")
safeSysctl := "net.ipv4.ip_local_port_range" safeSysctl := "net.ipv4.ip_local_port_range"
safeSysctlValue := "1024 1042" safeSysctlValue := "1024 1042"
validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue}) validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue})
validPod = f.PodClient().Create(t.validPod) validPod = f.PodClient().Create(t.validPod)
By("Making sure the valid pod launches") ginkgo.By("Making sure the valid pod launches")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }
@ -96,19 +96,19 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod
} }
func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod { func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod {
By("Creating a pod with unsafe sysctls") ginkgo.By("Creating a pod with unsafe sysctls")
invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{ invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{
"fs.mount-max": "1000000", "fs.mount-max": "1000000",
}) })
invalidPod = f.PodClient().Create(invalidPod) invalidPod = f.PodClient().Create(invalidPod)
By("Making sure the invalid pod failed") ginkgo.By("Making sure the invalid pod failed")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }
Expect(ev.Reason).To(Equal(sysctl.ForbiddenReason)) gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason))
return invalidPod return invalidPod
} }