Merge pull request #74508 from danielqsj/uapps

Fix golint failures for e2e/upgrades/...
This commit is contained in:
Kubernetes Prow Robot 2019-02-25 21:48:42 -08:00 committed by GitHub
commit 33a0afafe7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 278 additions and 254 deletions

View File

@ -673,9 +673,6 @@ test/e2e/storage/testsuites
test/e2e/storage/utils test/e2e/storage/utils
test/e2e/storage/vsphere test/e2e/storage/vsphere
test/e2e/ui test/e2e/ui
test/e2e/upgrades
test/e2e/upgrades/apps
test/e2e/upgrades/storage
test/e2e/windows test/e2e/windows
test/e2e_kubeadm test/e2e_kubeadm
test/e2e_node test/e2e_node

View File

@ -66,7 +66,7 @@ var gpuUpgradeTests = []upgrades.Test{
} }
var statefulsetUpgradeTests = []upgrades.Test{ var statefulsetUpgradeTests = []upgrades.Test{
&upgrades.MySqlUpgradeTest{}, &upgrades.MySQLUpgradeTest{},
&upgrades.EtcdUpgradeTest{}, &upgrades.EtcdUpgradeTest{},
&upgrades.CassandraUpgradeTest{}, &upgrades.CassandraUpgradeTest{},
} }

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct" "github.com/onsi/gomega/gstruct"
) )
@ -32,8 +32,10 @@ type AppArmorUpgradeTest struct {
pod *api.Pod pod *api.Pod
} }
// Name returns the tracking name of the test.
func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" } func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" }
// Skip returns true when this test can be skipped.
func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool { func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
supportedImages := make(map[string]bool) supportedImages := make(map[string]bool)
for _, d := range common.AppArmorDistros { for _, d := range common.AppArmorDistros {
@ -50,11 +52,11 @@ func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
// Setup creates a secret and then verifies that a pod can consume it. // Setup creates a secret and then verifies that a pod can consume it.
func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) { func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
By("Loading AppArmor profiles to nodes") ginkgo.By("Loading AppArmor profiles to nodes")
common.LoadAppArmorProfiles(f) common.LoadAppArmorProfiles(f)
// Create the initial test pod. // Create the initial test pod.
By("Creating a long-running AppArmor enabled pod.") ginkgo.By("Creating a long-running AppArmor enabled pod.")
t.pod = common.CreateAppArmorTestPod(f, false, false) t.pod = common.CreateAppArmorTestPod(f, false, false)
// Verify initial state. // Verify initial state.
@ -76,32 +78,32 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything // rely on the namespace deletion to clean up everything
By("Logging container failures") ginkgo.By("Logging container failures")
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
} }
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
By("Verifying an AppArmor profile is continuously enforced for a pod") ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{}) pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod") framework.ExpectNoError(err, "Should be able to get pod")
Expect(pod.Status.Phase).To(Equal(api.PodRunning), "Pod should stay running") gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(BeNil(), "Container should be running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeZero(), "Container should not need to be restarted") gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
} }
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
By("Verifying an AppArmor profile is enforced for a new pod") ginkgo.By("Verifying an AppArmor profile is enforced for a new pod")
common.CreateAppArmorTestPod(f, false, true) common.CreateAppArmorTestPod(f, false, true)
} }
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
By("Verifying nodes are AppArmor enabled") ginkgo.By("Verifying nodes are AppArmor enabled")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list nodes") framework.ExpectNoError(err, "Failed to list nodes")
for _, node := range nodes.Items { for _, node := range nodes.Items {
Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{ gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
"Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
"Message": ContainSubstring("AppArmor enabled"), "Message": gomega.ContainSubstring("AppArmor enabled"),
}), }),
})) }))
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package upgrades package upgrades
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -35,6 +35,7 @@ type DaemonSetUpgradeTest struct {
daemonSet *apps.DaemonSet daemonSet *apps.DaemonSet
} }
// Name returns the tracking name of the test.
func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" } func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" }
// Setup creates a DaemonSet and verifies that it's running // Setup creates a DaemonSet and verifies that it's running
@ -74,29 +75,29 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
}, },
} }
By("Creating a DaemonSet") ginkgo.By("Creating a DaemonSet")
var err error var err error
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil { if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
} }
By("Waiting for DaemonSet pods to become ready") ginkgo.By("Waiting for DaemonSet pods to become ready")
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels) return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Validating the DaemonSet after creation") ginkgo.By("Validating the DaemonSet after creation")
t.validateRunningDaemonSet(f) t.validateRunningDaemonSet(f)
} }
// Test waits until the upgrade has completed and then verifies that the DaemonSet // Test waits until the upgrade has completed and then verifies that the DaemonSet
// is still running // is still running
func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for upgradet to complete before re-validating DaemonSet") ginkgo.By("Waiting for upgradet to complete before re-validating DaemonSet")
<-done <-done
By("validating the DaemonSet is still running after upgrade") ginkgo.By("validating the DaemonSet is still running after upgrade")
t.validateRunningDaemonSet(f) t.validateRunningDaemonSet(f)
} }
@ -106,7 +107,7 @@ func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
} }
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) { func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
By("confirming the DaemonSet pods are running on all expected nodes") ginkgo.By("confirming the DaemonSet pods are running on all expected nodes")
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels) res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if !res { if !res {
@ -114,7 +115,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
} }
// DaemonSet resource itself should be good // DaemonSet resource itself should be good
By("confirming the DaemonSet resource is in a good state") ginkgo.By("confirming the DaemonSet resource is in a good state")
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name) res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if !res { if !res {

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -46,6 +46,7 @@ type DeploymentUpgradeTest struct {
newRSUID types.UID newRSUID types.UID
} }
// Name returns the tracking name of the test.
func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" } func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" }
// Setup creates a deployment and makes sure it has a new and an old replicaset running. // Setup creates a deployment and makes sure it has a new and an old replicaset running.
@ -57,15 +58,15 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
deploymentClient := c.AppsV1().Deployments(ns) deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns) rsClient := c.AppsV1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType) d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d) deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
framework.ExpectNoError(err) framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()}) rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -76,20 +77,20 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
} }
t.oldRSUID = rss[0].UID t.oldRSUID = rss[0].UID
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1")) framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
// Trigger a new rollout so that we have some history. // Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name" update.Spec.Template.Spec.Containers[0].Name = "updated-name"
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()}) rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err) framework.ExpectNoError(err)
rss = rsList.Items rss = rsList.Items
@ -97,7 +98,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss))) framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
} }
By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName)) ginkgo.By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
switch t.oldRSUID { switch t.oldRSUID {
case rss[0].UID: case rss[0].UID:
t.newRSUID = rss[1].UID t.newRSUID = rss[1].UID
@ -107,7 +108,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID)) framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID))
} }
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2")) framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
t.oldDeploymentUID = deployment.UID t.oldDeploymentUID = deployment.UID
@ -116,7 +117,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Test checks whether the replicasets for a deployment are the same after an upgrade. // Test checks whether the replicasets for a deployment are the same after an upgrade.
func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
// Block until upgrade is done // Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
<-done <-done
c := f.ClientSet c := f.ClientSet
@ -127,10 +128,10 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{}) deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName)) ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
Expect(deployment.UID).To(Equal(t.oldDeploymentUID)) gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID))
By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName)) ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err) framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()}) rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -142,27 +143,27 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
switch t.oldRSUID { switch t.oldRSUID {
case rss[0].UID: case rss[0].UID:
Expect(rss[1].UID).To(Equal(t.newRSUID)) gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID))
case rss[1].UID: case rss[1].UID:
Expect(rss[0].UID).To(Equal(t.newRSUID)) gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID))
default: default:
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName)) framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
} }
By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName)) ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2")) gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2"))
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1 // Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName)) ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) { _, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1 *deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
} }

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
// JobUpgradeTest is a test harness for batch Jobs. // JobUpgradeTest is a test harness for batch Jobs.
@ -32,30 +32,31 @@ type JobUpgradeTest struct {
namespace string namespace string
} }
// Name returns the tracking name of the test.
func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" } func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
// Setup starts a Job with a parallelism of 2 and 2 completions running. // Setup starts a Job with a parallelism of 2 and 2 completions running.
func (t *JobUpgradeTest) Setup(f *framework.Framework) { func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.namespace = f.Namespace.Name t.namespace = f.Namespace.Name
By("Creating a job") ginkgo.By("Creating a job")
t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6) t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job) job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job t.job = job
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Ensuring active pods == parallelism") ginkgo.By("Ensuring active pods == parallelism")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2) err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
// Test verifies that the Jobs Pods are running after the an upgrade // Test verifies that the Jobs Pods are running after the an upgrade
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done <-done
By("Ensuring active pods == parallelism") ginkgo.By("Ensuring active pods == parallelism")
running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(running).To(BeTrue()) gomega.Expect(running).To(gomega.BeTrue())
} }
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -45,19 +45,21 @@ type ReplicaSetUpgradeTest struct {
UID types.UID UID types.UID
} }
// Name returns the tracking name of the test.
func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" } func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" }
// Setup creates a ReplicaSet and makes sure it's replicas ready.
func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet c := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
nginxImage := imageutils.GetE2EImage(imageutils.Nginx) nginxImage := imageutils.GetE2EImage(imageutils.Nginx)
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns)) ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage) replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet) rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName)) framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID r.UID = rs.UID
@ -70,28 +72,28 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
rsClient := c.AppsV1().ReplicaSets(ns) rsClient := c.AppsV1().ReplicaSets(ns)
// Block until upgrade is done // Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName)) ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
<-done <-done
// Verify the RS is the same (survives) after the upgrade // Verify the RS is the same (survives) after the upgrade
By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName)) ginkgo.By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{}) upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if upgradedRS.UID != r.UID { if upgradedRS.UID != r.UID {
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID)) framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))
} }
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName)) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName)) framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready // Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum)) ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) { _, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
*rs.Spec.Replicas = scaleNum *rs.Spec.Replicas = scaleNum
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName)) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName)) framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
} }

View File

@ -17,8 +17,8 @@ limitations under the License.
package upgrades package upgrades
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
@ -35,8 +35,10 @@ type StatefulSetUpgradeTest struct {
set *apps.StatefulSet set *apps.StatefulSet
} }
// Name returns the tracking name of the test.
func (StatefulSetUpgradeTest) Name() string { return "[sig-apps] statefulset-upgrade" } func (StatefulSetUpgradeTest) Name() string { return "[sig-apps] statefulset-upgrade" }
// Skip returns true when this test can be skipped.
func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0") minVersion := version.MustParseSemantic("1.5.0")
@ -65,50 +67,50 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
t.tester = framework.NewStatefulSetTester(f.ClientSet) t.tester = framework.NewStatefulSetTester(f.ClientSet)
t.tester.PauseNewPods(t.set) t.tester.PauseNewPods(t.set)
By("Creating service " + headlessSvcName + " in namespace " + ns) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service) _, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Creating statefulset " + ssName + " in namespace " + ns) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3 *(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set) _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Saturating stateful set " + t.set.Name) ginkgo.By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set) t.tester.Saturate(t.set)
t.verify() t.verify()
t.restart() t.restart()
t.verify() t.verify()
} }
// Waits for the upgrade to complete and verifies the StatefulSet basic functionality // Test waits for the upgrade to complete and verifies the StatefulSet basic functionality
func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done <-done
t.verify() t.verify()
} }
// Deletes all StatefulSets // Teardown deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) { func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name) framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
} }
func (t *StatefulSetUpgradeTest) verify() { func (t *StatefulSetUpgradeTest) verify() {
By("Verifying statefulset mounted data directory is usable") ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(t.tester.CheckMount(t.set, "/data")) framework.ExpectNoError(t.tester.CheckMount(t.set, "/data"))
By("Verifying statefulset provides a stable hostname for each pod") ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(t.tester.CheckHostname(t.set)) framework.ExpectNoError(t.tester.CheckHostname(t.set))
By("Verifying statefulset set proper service name") ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName)) framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;" cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all stateful pods") ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd)) framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd))
} }
func (t *StatefulSetUpgradeTest) restart() { func (t *StatefulSetUpgradeTest) restart() {
By("Restarting statefulset " + t.set.Name) ginkgo.By("Restarting statefulset " + t.set.Name)
t.tester.Restart(t.set) t.tester.Restart(t.set)
t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set) t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set)
} }

View File

@ -25,8 +25,8 @@ import (
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func cassandraKubectlCreate(ns, file string) { func cassandraKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), Fail)) input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
@ -75,16 +75,16 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
statefulsetTimeout := 10 * time.Minute statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet) t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB") ginkgo.By("Creating a PDB")
cassandraKubectlCreate(ns, "pdb.yaml") cassandraKubectlCreate(ns, "pdb.yaml")
By("Creating a Cassandra StatefulSet") ginkgo.By("Creating a Cassandra StatefulSet")
t.ssTester.CreateStatefulSet(cassandraManifestPath, ns) t.ssTester.CreateStatefulSet(cassandraManifestPath, ns)
By("Creating a cassandra-test-server deployment") ginkgo.By("Creating a cassandra-test-server deployment")
cassandraKubectlCreate(ns, "tester.yaml") cassandraKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services") ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
@ -95,18 +95,18 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up") framework.Logf("Service endpoint is up")
By("Adding 2 dummy users") ginkgo.By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
t.successfulWrites = 2 t.successfulWrites = 2
By("Verifying that the users exist") ginkgo.By("Verifying that the users exist")
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users)).To(Equal(2)) gomega.Expect(len(users)).To(gomega.Equal(2))
} }
// listUsers gets a list of users from the db via the tester service. // listUsers gets a list of users from the db via the tester service.
@ -151,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
// getServiceIP is a helper method to extract the Ingress IP from the service. // getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 { if len(ingress) == 0 {
return "" return ""
@ -165,7 +165,7 @@ func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName
// ratio is over a certain threshold (0.75). We also verify that we get // ratio is over a certain threshold (0.75). We also verify that we get
// at least the same number of rows back as we successfully wrote. // at least the same number of rows back as we successfully wrote.
func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.") ginkgo.By("Continuously polling the database during upgrade.")
var ( var (
success, failures, writeAttempts, lastUserCount int success, failures, writeAttempts, lastUserCount int
mu sync.Mutex mu sync.Mutex
@ -199,19 +199,19 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
}, 10*time.Millisecond, done) }, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue()) gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures) ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts) ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors) framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs. // TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue()) gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
} }
// Teardown does one final check of the data's availability. // Teardown does one final check of the data's availability.
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) { func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue()) gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
} }

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
) )
@ -34,6 +34,7 @@ type ConfigMapUpgradeTest struct {
configMap *v1.ConfigMap configMap *v1.ConfigMap
} }
// Name returns the tracking name of the test.
func (ConfigMapUpgradeTest) Name() string { func (ConfigMapUpgradeTest) Name() string {
return "[sig-storage] [sig-api-machinery] configmap-upgrade" return "[sig-storage] [sig-api-machinery] configmap-upgrade"
} }
@ -54,13 +55,13 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
}, },
} }
By("Creating a ConfigMap") ginkgo.By("Creating a ConfigMap")
var err error var err error
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil { if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
} }
By("Making sure the ConfigMap is consumable") ginkgo.By("Making sure the ConfigMap is consumable")
t.testPod(f) t.testPod(f)
} }
@ -68,7 +69,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the ConfigMap. // pod can still consume the ConfigMap.
func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done <-done
By("Consuming the ConfigMap after upgrade") ginkgo.By("Consuming the ConfigMap after upgrade")
t.testPod(f) t.testPod(f)
} }

View File

@ -25,8 +25,8 @@ import (
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
@ -37,14 +37,17 @@ import (
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd" const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
// EtcdUpgradeTest tests that etcd is writable before and after a cluster upgrade.
type EtcdUpgradeTest struct { type EtcdUpgradeTest struct {
ip string ip string
successfulWrites int successfulWrites int
ssTester *framework.StatefulSetTester ssTester *framework.StatefulSetTester
} }
// Name returns the tracking name of the test.
func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" } func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" }
// Skip returns true when this test can be skipped.
func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool { func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.6.0") minVersion := version.MustParseSemantic("1.6.0")
for _, vCtx := range upgCtx.Versions { for _, vCtx := range upgCtx.Versions {
@ -56,26 +59,27 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func kubectlCreate(ns, file string) { func kubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), Fail)) input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
// Setup creates etcd statefulset and then verifies that the etcd is writable.
func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet) t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB") ginkgo.By("Creating a PDB")
kubectlCreate(ns, "pdb.yaml") kubectlCreate(ns, "pdb.yaml")
By("Creating an etcd StatefulSet") ginkgo.By("Creating an etcd StatefulSet")
t.ssTester.CreateStatefulSet(manifestPath, ns) t.ssTester.CreateStatefulSet(manifestPath, ns)
By("Creating an etcd--test-server deployment") ginkgo.By("Creating an etcd--test-server deployment")
kubectlCreate(ns, "tester.yaml") kubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services") ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
@ -86,18 +90,18 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up") framework.Logf("Service endpoint is up")
By("Adding 2 dummy users") ginkgo.By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred()) gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
t.successfulWrites = 2 t.successfulWrites = 2
By("Verifying that the users exist") ginkgo.By("Verifying that the users exist")
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users)).To(Equal(2)) gomega.Expect(len(users)).To(gomega.Equal(2))
} }
func (t *EtcdUpgradeTest) listUsers() ([]string, error) { func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
@ -139,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 { if len(ingress) == 0 {
return "" return ""
@ -147,8 +151,9 @@ func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName strin
return ingress[0].IP return ingress[0].IP
} }
// Test waits for upgrade to complete and verifies if etcd is writable.
func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.") ginkgo.By("Continuously polling the database during upgrade.")
var ( var (
success, failures, writeAttempts, lastUserCount int success, failures, writeAttempts, lastUserCount int
mu sync.Mutex mu sync.Mutex
@ -182,19 +187,19 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
}, 10*time.Millisecond, done) }, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue()) gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures) ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts) ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors) framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs. // TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue()) gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
} }
// Teardown does one final check of the data's availability. // Teardown does one final check of the data's availability.
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) { func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers() users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue()) gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
} }

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade. // HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
@ -33,9 +33,10 @@ type HPAUpgradeTest struct {
hpa *autoscalingv1.HorizontalPodAutoscaler hpa *autoscalingv1.HorizontalPodAutoscaler
} }
// Name returns the tracking name of the test.
func (HPAUpgradeTest) Name() string { return "hpa-upgrade" } func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
// Creates a resource consumer and an HPA object that autoscales the consumer. // Setup creates a resource consumer and an HPA object that autoscales the consumer.
func (t *HPAUpgradeTest) Setup(f *framework.Framework) { func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
t.rc = common.NewDynamicResourceConsumer( t.rc = common.NewDynamicResourceConsumer(
"res-cons-upgrade", "res-cons-upgrade",
@ -63,7 +64,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
// Test waits for upgrade to complete and verifies if HPA works correctly. // Test waits for upgrade to complete and verifies if HPA works correctly.
func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
// Block until upgrade is done // Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA")) ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
<-done <-done
t.test() t.test()
} }
@ -79,19 +80,19 @@ func (t *HPAUpgradeTest) test() {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
t.rc.Resume() t.rc.Resume()
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1.")) ginkgo.By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
t.rc.ConsumeCPU(10) /* millicores */ t.rc.ConsumeCPU(10) /* millicores */
By(fmt.Sprintf("HPA waits for 1 replica")) ginkgo.By(fmt.Sprintf("HPA waits for 1 replica"))
t.rc.WaitForReplicas(1, timeToWait) t.rc.WaitForReplicas(1, timeToWait)
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores.")) ginkgo.By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
t.rc.ConsumeCPU(250) /* millicores */ t.rc.ConsumeCPU(250) /* millicores */
By(fmt.Sprintf("HPA waits for 3 replicas")) ginkgo.By(fmt.Sprintf("HPA waits for 3 replicas"))
t.rc.WaitForReplicas(3, timeToWait) t.rc.WaitForReplicas(3, timeToWait)
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5.")) ginkgo.By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
t.rc.ConsumeCPU(700) /* millicores */ t.rc.ConsumeCPU(700) /* millicores */
By(fmt.Sprintf("HPA waits for 5 replicas")) ginkgo.By(fmt.Sprintf("HPA waits for 5 replicas"))
t.rc.WaitForReplicas(5, timeToWait) t.rc.WaitForReplicas(5, timeToWait)
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail. // We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.

View File

@ -24,7 +24,7 @@ import (
"reflect" "reflect"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
compute "google.golang.org/api/compute/v1" compute "google.golang.org/api/compute/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -61,10 +61,11 @@ type GCPResourceStore struct {
TpsList []*compute.TargetHttpsProxy TpsList []*compute.TargetHttpsProxy
SslList []*compute.SslCertificate SslList []*compute.SslCertificate
BeList []*compute.BackendService BeList []*compute.BackendService
Ip *compute.Address IP *compute.Address
IgList []*compute.InstanceGroup IgList []*compute.InstanceGroup
} }
// Name returns the tracking name of the test.
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" } func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
// Setup creates a GLBC, allocates an ip, and an ingress resource, // Setup creates a GLBC, allocates an ip, and an ingress resource,
@ -96,17 +97,17 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
t.ip = t.gceController.CreateStaticIP(t.ipName) t.ip = t.gceController.CreateStaticIP(t.ipName)
// Create a working basic Ingress // Create a working basic Ingress
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip)) ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{ jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
ingress.IngressStaticIPKey: t.ipName, ingress.IngressStaticIPKey: t.ipName,
ingress.IngressAllowHTTPKey: "false", ingress.IngressAllowHTTPKey: "false",
}, map[string]string{}) }, map[string]string{})
t.jig.SetHTTPS("tls-secret", "ingress.test.com") t.jig.SetHTTPS("tls-secret", "ingress.test.com")
By("waiting for Ingress to come up with ip: " + t.ip) ginkgo.By("waiting for Ingress to come up with ip: " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
By("keeping track of GCP resources created by Ingress") ginkgo.By("keeping track of GCP resources created by Ingress")
t.resourceStore = &GCPResourceStore{} t.resourceStore = &GCPResourceStore{}
t.populateGCPResourceStore(t.resourceStore) t.populateGCPResourceStore(t.resourceStore)
} }
@ -134,18 +135,18 @@ func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) { func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(t.gceController.Ns) framework.DescribeIng(t.gceController.Ns)
} }
if t.jig.Ingress != nil { if t.jig.Ingress != nil {
By("Deleting ingress") ginkgo.By("Deleting ingress")
t.jig.TryDeleteIngress() t.jig.TryDeleteIngress()
} else { } else {
By("No ingress created, no cleanup necessary") ginkgo.By("No ingress created, no cleanup necessary")
} }
By("Cleaning up cloud resources") ginkgo.By("Cleaning up cloud resources")
framework.ExpectNoError(t.gceController.CleanupGCEIngressController()) framework.ExpectNoError(t.gceController.CleanupGCEIngressController())
} }
@ -171,20 +172,20 @@ func (t *IngressUpgradeTest) Skip(upgCtx UpgradeContext) bool {
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) { func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption { if testDuringDisruption {
By("continuously hitting the Ingress IP") ginkgo.By("continuously hitting the Ingress IP")
wait.Until(func() { wait.Until(func() {
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}, t.jig.PollInterval, done) }, t.jig.PollInterval, done)
} else { } else {
By("waiting for upgrade to finish without checking if Ingress remains up") ginkgo.By("waiting for upgrade to finish without checking if Ingress remains up")
<-done <-done
} }
By("hitting the Ingress IP " + t.ip) ginkgo.By("hitting the Ingress IP " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
// We want to manually trigger a sync because then we can easily verify // We want to manually trigger a sync because then we can easily verify
// a correct sync completed after update. // a correct sync completed after update.
By("updating ingress spec to manually trigger a sync") ginkgo.By("updating ingress spec to manually trigger a sync")
t.jig.Update(func(ing *extensions.Ingress) { t.jig.Update(func(ing *extensions.Ingress) {
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append( ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths, ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
@ -197,7 +198,7 @@ func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}
// WaitForIngress() tests that all paths are pinged, which is how we know // WaitForIngress() tests that all paths are pinged, which is how we know
// everything is synced with the cloud. // everything is synced with the cloud.
t.jig.WaitForIngress(false) t.jig.WaitForIngress(false)
By("comparing GCP resources post-upgrade") ginkgo.By("comparing GCP resources post-upgrade")
postUpgradeResourceStore := &GCPResourceStore{} postUpgradeResourceStore := &GCPResourceStore{}
t.populateGCPResourceStore(postUpgradeResourceStore) t.populateGCPResourceStore(postUpgradeResourceStore)
@ -238,7 +239,7 @@ func (t *IngressUpgradeTest) populateGCPResourceStore(resourceStore *GCPResource
resourceStore.TpsList = cont.ListTargetHttpsProxies() resourceStore.TpsList = cont.ListTargetHttpsProxies()
resourceStore.SslList = cont.ListSslCertificates() resourceStore.SslList = cont.ListSslCertificates()
resourceStore.BeList = cont.ListGlobalBackendServices() resourceStore.BeList = cont.ListGlobalBackendServices()
resourceStore.Ip = cont.GetGlobalAddress(t.ipName) resourceStore.IP = cont.GetGlobalAddress(t.ipName)
resourceStore.IgList = cont.ListInstanceGroups() resourceStore.IgList = cont.ListInstanceGroups()
} }

View File

@ -28,8 +28,8 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -43,12 +43,13 @@ const (
type KubeProxyUpgradeTest struct { type KubeProxyUpgradeTest struct {
} }
// Name returns the tracking name of the test.
func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" } func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" }
// Setup verifies kube-proxy static pods is running before uprgade. // Setup verifies kube-proxy static pods is running before uprgade.
func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) { func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy static pods running and ready") ginkgo.By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
} }
// Test validates if kube-proxy is migrated from static pods to DaemonSet. // Test validates if kube-proxy is migrated from static pods to DaemonSet.
@ -56,14 +57,14 @@ func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
c := f.ClientSet c := f.ClientSet
// Block until upgrade is done. // Block until upgrade is done.
By("Waiting for upgrade to finish") ginkgo.By("Waiting for upgrade to finish")
<-done <-done
By("Waiting for kube-proxy static pods disappear") ginkgo.By("Waiting for kube-proxy static pods disappear")
Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(gomega.HaveOccurred())
By("Waiting for kube-proxy DaemonSet running and ready") ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(gomega.HaveOccurred())
} }
// Teardown does nothing. // Teardown does nothing.
@ -74,12 +75,13 @@ func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) {
type KubeProxyDowngradeTest struct { type KubeProxyDowngradeTest struct {
} }
// Name returns the tracking name of the test.
func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" } func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" }
// Setup verifies kube-proxy DaemonSet is running before uprgade. // Setup verifies kube-proxy DaemonSet is running before uprgade.
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) { func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy DaemonSet running and ready") ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
} }
// Test validates if kube-proxy is migrated from DaemonSet to static pods. // Test validates if kube-proxy is migrated from DaemonSet to static pods.
@ -87,14 +89,14 @@ func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct
c := f.ClientSet c := f.ClientSet
// Block until upgrade is done. // Block until upgrade is done.
By("Waiting for upgrade to finish") ginkgo.By("Waiting for upgrade to finish")
<-done <-done
By("Waiting for kube-proxy DaemonSet disappear") ginkgo.By("Waiting for kube-proxy DaemonSet disappear")
Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(gomega.HaveOccurred())
By("Waiting for kube-proxy static pods running and ready") ginkgo.By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(HaveOccurred()) gomega.Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(gomega.HaveOccurred())
} }
// Teardown does nothing. // Teardown does nothing.

View File

@ -25,8 +25,8 @@ import (
"strconv" "strconv"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
@ -37,17 +37,19 @@ import (
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade" const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
// MySqlUpgradeTest implements an upgrade test harness that polls a replicated sql database. // MySQLUpgradeTest implements an upgrade test harness that polls a replicated sql database.
type MySqlUpgradeTest struct { type MySQLUpgradeTest struct {
ip string ip string
successfulWrites int successfulWrites int
nextWrite int nextWrite int
ssTester *framework.StatefulSetTester ssTester *framework.StatefulSetTester
} }
func (MySqlUpgradeTest) Name() string { return "mysql-upgrade" } // Name returns the tracking name of the test.
func (MySQLUpgradeTest) Name() string { return "mysql-upgrade" }
func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool { // Skip returns true when this test can be skipped.
func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0") minVersion := version.MustParseSemantic("1.5.0")
for _, vCtx := range upgCtx.Versions { for _, vCtx := range upgCtx.Versions {
@ -59,13 +61,13 @@ func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func mysqlKubectlCreate(ns, file string) { func mysqlKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), Fail)) input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 { if len(ingress) == 0 {
return "" return ""
@ -77,22 +79,22 @@ func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName stri
// from the db. It then connects to the db with the write Service and populates the db with a table // from the db. It then connects to the db with the write Service and populates the db with a table
// and a few entries. Finally, it connects to the db with the read Service, and confirms the data is // and a few entries. Finally, it connects to the db with the read Service, and confirms the data is
// available. The db connections are left open to be used later in the test. // available. The db connections are left open to be used later in the test.
func (t *MySqlUpgradeTest) Setup(f *framework.Framework) { func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet) t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a configmap") ginkgo.By("Creating a configmap")
mysqlKubectlCreate(ns, "configmap.yaml") mysqlKubectlCreate(ns, "configmap.yaml")
By("Creating a mysql StatefulSet") ginkgo.By("Creating a mysql StatefulSet")
t.ssTester.CreateStatefulSet(mysqlManifestPath, ns) t.ssTester.CreateStatefulSet(mysqlManifestPath, ns)
By("Creating a mysql-test-server deployment") ginkgo.By("Creating a mysql-test-server deployment")
mysqlKubectlCreate(ns, "tester.yaml") mysqlKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the test-service") ginkgo.By("Getting the ingress IPs from the test-service")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
@ -103,24 +105,24 @@ func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up") framework.Logf("Service endpoint is up")
By("Adding 2 names to the database") ginkgo.By("Adding 2 names to the database")
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred()) gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred()) gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
By("Verifying that the 2 names have been inserted") ginkgo.By("Verifying that the 2 names have been inserted")
count, err := t.countNames() count, err := t.countNames()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(count).To(Equal(2)) gomega.Expect(count).To(gomega.Equal(2))
} }
// Test continually polls the db using the read and write connections, inserting data, and checking // Test continually polls the db using the read and write connections, inserting data, and checking
// that all the data is readable. // that all the data is readable.
func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
var writeSuccess, readSuccess, writeFailure, readFailure int var writeSuccess, readSuccess, writeFailure, readFailure int
By("Continuously polling the database during upgrade.") ginkgo.By("Continuously polling the database during upgrade.")
go wait.Until(func() { go wait.Until(func() {
_, err := t.countNames() _, err := t.countNames()
if err != nil { if err != nil {
@ -162,14 +164,14 @@ func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
} }
// Teardown performs one final check of the data's availability. // Teardown performs one final check of the data's availability.
func (t *MySqlUpgradeTest) Teardown(f *framework.Framework) { func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) {
count, err := t.countNames() count, err := t.countNames()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(count >= t.successfulWrites).To(BeTrue()) gomega.Expect(count >= t.successfulWrites).To(gomega.BeTrue())
} }
// addName adds a new value to the db. // addName adds a new value to the db.
func (t *MySqlUpgradeTest) addName(name string) error { func (t *MySQLUpgradeTest) addName(name string) error {
val := map[string][]string{"name": {name}} val := map[string][]string{"name": {name}}
t.nextWrite++ t.nextWrite++
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val) r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val)
@ -189,7 +191,7 @@ func (t *MySqlUpgradeTest) addName(name string) error {
// countNames checks to make sure the values in testing.users are available, and returns // countNames checks to make sure the values in testing.users are available, and returns
// the count of them. // the count of them.
func (t *MySqlUpgradeTest) countNames() (int, error) { func (t *MySQLUpgradeTest) countNames() (int, error) {
r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip)) r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip))
if err != nil { if err != nil {
return 0, err return 0, err

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
// NvidiaGPUUpgradeTest tests that gpu resource is available before and after // NvidiaGPUUpgradeTest tests that gpu resource is available before and after
@ -35,12 +35,13 @@ import (
type NvidiaGPUUpgradeTest struct { type NvidiaGPUUpgradeTest struct {
} }
// Name returns the tracking name of the test.
func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" } func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" }
// Setup creates a job requesting gpu. // Setup creates a job requesting gpu.
func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) { func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false) scheduling.SetupNVIDIAGPUNode(f, false)
By("Creating a job requesting gpu") ginkgo.By("Creating a job requesting gpu")
t.startJob(f) t.startJob(f)
} }
@ -48,13 +49,13 @@ func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
// cuda pod started by the gpu job can successfully finish. // cuda pod started by the gpu job can successfully finish.
func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done <-done
By("Verifying gpu job success") ginkgo.By("Verifying gpu job success")
t.verifyJobPodSuccess(f) t.verifyJobPodSuccess(f)
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade { if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
// MasterUpgrade should be totally hitless. // MasterUpgrade should be totally hitless.
job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add") job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(job.Status.Failed).To(BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed) gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
} }
} }
@ -85,12 +86,12 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
} }
ns := f.Namespace.Name ns := f.Namespace.Name
_, err := framework.CreateJob(f.ClientSet, ns, testJob) _, err := framework.CreateJob(f.ClientSet, ns, testJob)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Created job %v", testJob) framework.Logf("Created job %v", testJob)
By("Waiting for gpu job pod start") ginkgo.By("Waiting for gpu job pod start")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1) err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Done with gpu job pod start") ginkgo.By("Done with gpu job pod start")
} }
// verifyJobPodSuccess verifies that the started cuda pod successfully passes. // verifyJobPodSuccess verifies that the started cuda pod successfully passes.
@ -98,9 +99,9 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
// Wait for client pod to complete. // Wait for client pod to complete.
ns := f.Namespace.Name ns := f.Namespace.Name
err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1) err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add") pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
createdPod := pods.Items[0].Name createdPod := pods.Items[0].Name
framework.Logf("Created pod %v", createdPod) framework.Logf("Created pod %v", createdPod)
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute) f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
@ -108,5 +109,5 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
framework.ExpectNoError(err, "Should be able to get pod logs") framework.ExpectNoError(err, "Should be able to get pod logs")
framework.Logf("Got pod logs: %v", logs) framework.Logf("Got pod logs: %v", logs)
regex := regexp.MustCompile("PASSED") regex := regexp.MustCompile("PASSED")
Expect(regex.MatchString(logs)).To(BeTrue()) gomega.Expect(regex.MatchString(logs)).To(gomega.BeTrue())
} }

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
// SecretUpgradeTest test that a secret is available before and after // SecretUpgradeTest test that a secret is available before and after
@ -34,6 +34,7 @@ type SecretUpgradeTest struct {
secret *v1.Secret secret *v1.Secret
} }
// Name returns the tracking name of the test.
func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" } func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" }
// Setup creates a secret and then verifies that a pod can consume it. // Setup creates a secret and then verifies that a pod can consume it.
@ -52,13 +53,13 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
}, },
} }
By("Creating a secret") ginkgo.By("Creating a secret")
var err error var err error
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil { if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
} }
By("Making sure the secret is consumable") ginkgo.By("Making sure the secret is consumable")
t.testPod(f) t.testPod(f)
} }
@ -66,7 +67,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the secret. // pod can still consume the secret.
func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done <-done
By("Consuming the secret after upgrade") ginkgo.By("Consuming the secret after upgrade")
t.testPod(f) t.testPod(f)
} }

View File

@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
// ServiceUpgradeTest tests that a service is available before and // ServiceUpgradeTest tests that a service is available before and
@ -34,6 +34,7 @@ type ServiceUpgradeTest struct {
svcPort int svcPort int
} }
// Name returns the tracking name of the test.
func (ServiceUpgradeTest) Name() string { return "service-upgrade" } func (ServiceUpgradeTest) Name() string { return "service-upgrade" }
func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") } func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
@ -45,7 +46,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace ns := f.Namespace
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name) ginkgo.By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) { tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer s.Spec.Type = v1.ServiceTypeLoadBalancer
}) })
@ -56,16 +57,16 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port) svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName) ginkgo.By("creating pod to be part of service " + serviceName)
rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity) rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity)
if shouldTestPDBs() { if shouldTestPDBs() {
By("creating a PodDisruptionBudget to cover the ReplicationController") ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
jig.CreatePDBOrFail(ns.Name, rc) jig.CreatePDBOrFail(ns.Name, rc)
} }
// Hit it once before considering ourselves ready // Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer") ginkgo.By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig = jig t.jig = jig
@ -95,18 +96,18 @@ func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) {
func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) { func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption { if testDuringDisruption {
// Continuous validation // Continuous validation
By("continuously hitting the pod through the service's LoadBalancer") ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
wait.Until(func() { wait.Until(func() {
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault) t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
}, framework.Poll, done) }, framework.Poll, done)
} else { } else {
// Block until upgrade is done // Block until upgrade is done
By("waiting for upgrade to finish without checking if service remains up") ginkgo.By("waiting for upgrade to finish without checking if service remains up")
<-done <-done
} }
// Sanity check and hit it once more // Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer") ginkgo.By("hitting the pod through the service's LoadBalancer")
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault) t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer) t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
} }

View File

@ -21,8 +21,8 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
) )
@ -33,6 +33,7 @@ type PersistentVolumeUpgradeTest struct {
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
} }
// Name returns the tracking name of the test.
func (PersistentVolumeUpgradeTest) Name() string { return "[sig-storage] persistent-volume-upgrade" } func (PersistentVolumeUpgradeTest) Name() string { return "[sig-storage] persistent-volume-upgrade" }
const ( const (
@ -55,7 +56,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
By("Initializing PV source") ginkgo.By("Initializing PV source")
t.pvSource, _ = framework.CreateGCEVolume() t.pvSource, _ = framework.CreateGCEVolume()
pvConfig := framework.PersistentVolumeConfig{ pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "pv-upgrade", NamePrefix: "pv-upgrade",
@ -65,12 +66,12 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
emptyStorageClass := "" emptyStorageClass := ""
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &emptyStorageClass} pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &emptyStorageClass}
By("Creating the PV and PVC") ginkgo.By("Creating the PV and PVC")
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true) t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc))
By("Consuming the PV before upgrade") ginkgo.By("Consuming the PV before upgrade")
t.testPod(f, pvWriteCmd+";"+pvReadCmd) t.testPod(f, pvWriteCmd+";"+pvReadCmd)
} }
@ -78,7 +79,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
// and that the volume data persists. // and that the volume data persists.
func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done <-done
By("Consuming the PV after upgrade") ginkgo.By("Consuming the PV after upgrade")
t.testPod(f, pvReadCmd) t.testPod(f, pvReadCmd)
} }

View File

@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const devicePath = "/mnt/volume1" const devicePath = "/mnt/volume1"
@ -43,10 +43,12 @@ type VolumeModeDowngradeTest struct {
pod *v1.Pod pod *v1.Pod
} }
// Name returns the tracking name of the test.
func (VolumeModeDowngradeTest) Name() string { func (VolumeModeDowngradeTest) Name() string {
return "[sig-storage] volume-mode-downgrade" return "[sig-storage] volume-mode-downgrade"
} }
// Skip returns true when this test can be skipped.
func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") { if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") {
return true return true
@ -72,7 +74,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
cs := f.ClientSet cs := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
By("Creating a PVC") ginkgo.By("Creating a PVC")
block := v1.PersistentVolumeBlock block := v1.PersistentVolumeBlock
pvcConfig := framework.PersistentVolumeClaimConfig{ pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: nil, StorageClassName: nil,
@ -80,46 +82,46 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
} }
t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = framework.CreatePVC(cs, ns, t.pvc) t.pvc, err = framework.CreatePVC(cs, ns, t.pvc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{}) t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{}) t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Consuming the PVC before downgrade") ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Checking if PV exists as expected volume mode") ginkgo.By("Checking if PV exists as expected volume mode")
utils.CheckVolumeModeOfPath(t.pod, block, devicePath) utils.CheckVolumeModeOfPath(t.pod, block, devicePath)
By("Checking if read/write to PV works properly") ginkgo.By("Checking if read/write to PV works properly")
utils.CheckReadWriteToPath(t.pod, block, devicePath) utils.CheckReadWriteToPath(t.pod, block, devicePath)
} }
// Test waits for the downgrade to complete, and then verifies that a pod can no // Test waits for the downgrade to complete, and then verifies that a pod can no
// longer consume the pv as it is not mapped nor mounted into the pod // longer consume the pv as it is not mapped nor mounted into the pod
func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for downgrade to finish") ginkgo.By("Waiting for downgrade to finish")
<-done <-done
By("Verifying that nothing exists at the device path in the pod") ginkgo.By("Verifying that nothing exists at the device path in the pod")
utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1) utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
} }
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.
func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) {
By("Deleting the pod") ginkgo.By("Deleting the pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, f.ClientSet, t.pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, f.ClientSet, t.pod))
By("Deleting the PVC") ginkgo.By("Deleting the PVC")
framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil)) framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil))
By("Waiting for the PV to be deleted") ginkgo.By("Waiting for the PV to be deleted")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute))
} }

View File

@ -19,8 +19,8 @@ package upgrades
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
@ -32,7 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
// SecretUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During // SysctlUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
// a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is // a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is
// expected to keep failing before and after the upgrade. // expected to keep failing before and after the upgrade.
type SysctlUpgradeTest struct { type SysctlUpgradeTest struct {
@ -53,19 +53,19 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
<-done <-done
switch upgrade { switch upgrade {
case MasterUpgrade, ClusterUpgrade: case MasterUpgrade, ClusterUpgrade:
By("Checking the safe sysctl pod keeps running on master upgrade") ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(pod.Status.Phase).To(Equal(v1.PodRunning)) gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
} }
By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
if err == nil { if err == nil {
Expect(pod.Status.Phase).NotTo(Equal(v1.PodRunning)) gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
} }
t.verifySafeSysctlWork(f) t.verifySafeSysctlWork(f)
@ -78,15 +78,15 @@ func (t *SysctlUpgradeTest) Teardown(f *framework.Framework) {
} }
func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod { func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod {
By("Creating a pod with safe sysctls") ginkgo.By("Creating a pod with safe sysctls")
safeSysctl := "net.ipv4.ip_local_port_range" safeSysctl := "net.ipv4.ip_local_port_range"
safeSysctlValue := "1024 1042" safeSysctlValue := "1024 1042"
validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue}) validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue})
validPod = f.PodClient().Create(t.validPod) validPod = f.PodClient().Create(t.validPod)
By("Making sure the valid pod launches") ginkgo.By("Making sure the valid pod launches")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }
@ -96,19 +96,19 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod
} }
func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod { func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod {
By("Creating a pod with unsafe sysctls") ginkgo.By("Creating a pod with unsafe sysctls")
invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{ invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{
"fs.mount-max": "1000000", "fs.mount-max": "1000000",
}) })
invalidPod = f.PodClient().Create(invalidPod) invalidPod = f.PodClient().Create(invalidPod)
By("Making sure the invalid pod failed") ginkgo.By("Making sure the invalid pod failed")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }
Expect(ev.Reason).To(Equal(sysctl.ForbiddenReason)) gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason))
return invalidPod return invalidPod
} }