fix golint failures for test/e2e/upgrades/apps

This commit is contained in:
danielqsj 2019-02-25 13:32:15 +08:00
parent 139a13d312
commit 8916ccabaf
6 changed files with 68 additions and 62 deletions

View File

@ -688,7 +688,6 @@ test/e2e/storage/utils
test/e2e/storage/vsphere
test/e2e/ui
test/e2e/upgrades
test/e2e/upgrades/apps
test/e2e/upgrades/storage
test/e2e/windows
test/e2e_kubeadm

View File

@ -17,7 +17,7 @@ limitations under the License.
package upgrades
import (
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
@ -35,6 +35,7 @@ type DaemonSetUpgradeTest struct {
daemonSet *apps.DaemonSet
}
// Name returns the tracking name of the test.
func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" }
// Setup creates a DaemonSet and verifies that it's running
@ -74,29 +75,29 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
},
}
By("Creating a DaemonSet")
ginkgo.By("Creating a DaemonSet")
var err error
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
}
By("Waiting for DaemonSet pods to become ready")
ginkgo.By("Waiting for DaemonSet pods to become ready")
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
})
framework.ExpectNoError(err)
By("Validating the DaemonSet after creation")
ginkgo.By("Validating the DaemonSet after creation")
t.validateRunningDaemonSet(f)
}
// Test waits until the upgrade has completed and then verifies that the DaemonSet
// is still running
func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for upgradet to complete before re-validating DaemonSet")
ginkgo.By("Waiting for upgradet to complete before re-validating DaemonSet")
<-done
By("validating the DaemonSet is still running after upgrade")
ginkgo.By("validating the DaemonSet is still running after upgrade")
t.validateRunningDaemonSet(f)
}
@ -106,7 +107,7 @@ func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
}
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
By("confirming the DaemonSet pods are running on all expected nodes")
ginkgo.By("confirming the DaemonSet pods are running on all expected nodes")
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
framework.ExpectNoError(err)
if !res {
@ -114,7 +115,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
}
// DaemonSet resource itself should be good
By("confirming the DaemonSet resource is in a good state")
ginkgo.By("confirming the DaemonSet resource is in a good state")
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
framework.ExpectNoError(err)
if !res {

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -46,6 +46,7 @@ type DeploymentUpgradeTest struct {
newRSUID types.UID
}
// Name returns the tracking name of the test.
func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" }
// Setup creates a deployment and makes sure it has a new and an old replicaset running.
@ -57,15 +58,15 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -76,20 +77,20 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
}
t.oldRSUID = rss[0].UID
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
// Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss = rsList.Items
@ -97,7 +98,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
ginkgo.By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
switch t.oldRSUID {
case rss[0].UID:
t.newRSUID = rss[1].UID
@ -107,7 +108,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID))
}
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
t.oldDeploymentUID = deployment.UID
@ -116,7 +117,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Test checks whether the replicasets for a deployment are the same after an upgrade.
func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
<-done
c := f.ClientSet
@ -127,10 +128,10 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
Expect(deployment.UID).To(Equal(t.oldDeploymentUID))
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID))
By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -142,27 +143,27 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
switch t.oldRSUID {
case rss[0].UID:
Expect(rss[1].UID).To(Equal(t.newRSUID))
gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID))
case rss[1].UID:
Expect(rss[0].UID).To(Equal(t.newRSUID))
gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID))
default:
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
}
By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2"))
ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2"))
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
}

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// JobUpgradeTest is a test harness for batch Jobs.
@ -32,30 +32,31 @@ type JobUpgradeTest struct {
namespace string
}
// Name returns the tracking name of the test.
func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
// Setup starts a Job with a parallelism of 2 and 2 completions running.
func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.namespace = f.Namespace.Name
By("Creating a job")
ginkgo.By("Creating a job")
t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Ensuring active pods == parallelism")
ginkgo.By("Ensuring active pods == parallelism")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// Test verifies that the Jobs Pods are running after the an upgrade
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
By("Ensuring active pods == parallelism")
ginkgo.By("Ensuring active pods == parallelism")
running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(running).To(gomega.BeTrue())
}
// Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -45,19 +45,21 @@ type ReplicaSetUpgradeTest struct {
UID types.UID
}
// Name returns the tracking name of the test.
func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" }
// Setup creates a ReplicaSet and makes sure it's replicas ready.
func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet
ns := f.Namespace.Name
nginxImage := imageutils.GetE2EImage(imageutils.Nginx)
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID
@ -70,28 +72,28 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
rsClient := c.AppsV1().ReplicaSets(ns)
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
<-done
// Verify the RS is the same (survives) after the upgrade
By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
ginkgo.By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
if upgradedRS.UID != r.UID {
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))
}
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
}

View File

@ -17,8 +17,8 @@ limitations under the License.
package upgrades
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@ -35,8 +35,10 @@ type StatefulSetUpgradeTest struct {
set *apps.StatefulSet
}
// Name returns the tracking name of the test.
func (StatefulSetUpgradeTest) Name() string { return "[sig-apps] statefulset-upgrade" }
// Skip returns true when this test can be skipped.
func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0")
@ -65,50 +67,50 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
t.tester = framework.NewStatefulSetTester(f.ClientSet)
t.tester.PauseNewPods(t.set)
By("Creating service " + headlessSvcName + " in namespace " + ns)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Creating statefulset " + ssName + " in namespace " + ns)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Saturating stateful set " + t.set.Name)
ginkgo.By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set)
t.verify()
t.restart()
t.verify()
}
// Waits for the upgrade to complete and verifies the StatefulSet basic functionality
// Test waits for the upgrade to complete and verifies the StatefulSet basic functionality
func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
t.verify()
}
// Deletes all StatefulSets
// Teardown deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
}
func (t *StatefulSetUpgradeTest) verify() {
By("Verifying statefulset mounted data directory is usable")
ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(t.tester.CheckMount(t.set, "/data"))
By("Verifying statefulset provides a stable hostname for each pod")
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(t.tester.CheckHostname(t.set))
By("Verifying statefulset set proper service name")
ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all stateful pods")
ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd))
}
func (t *StatefulSetUpgradeTest) restart() {
By("Restarting statefulset " + t.set.Name)
ginkgo.By("Restarting statefulset " + t.set.Name)
t.tester.Restart(t.set)
t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set)
}