Merge pull request #79864 from draveness/feature/use-framework-expect-equal-instead

feat: use framework.ExpectEqual instead of should
This commit is contained in:
Kubernetes Prow Robot 2019-07-09 13:47:03 -07:00 committed by GitHub
commit 2d1ee3db83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 42 additions and 42 deletions

View File

@ -297,7 +297,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1)) framework.ExpectEqual(len(allOldRSs), 1)
} }
func testRecreateDeployment(f *framework.Framework) { func testRecreateDeployment(f *framework.Framework) {
@ -497,8 +497,8 @@ func testRolloverDeployment(f *framework.Framework) {
} }
func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) { func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
gomega.Expect(*rs.Spec.Replicas).Should(gomega.Equal(replicas)) framework.ExpectEqual(*rs.Spec.Replicas, replicas)
gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas)) framework.ExpectEqual(rs.Status.Replicas, replicas)
} }
func randomScale(d *appsv1.Deployment, i int) { func randomScale(d *appsv1.Deployment, i int) {
@ -652,7 +652,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
rsList := listDeploymentReplicaSets(c, ns, podLabels) rsList := listDeploymentReplicaSets(c, ns, podLabels)
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1)) framework.ExpectEqual(len(rsList.Items), 1)
e2elog.Logf("Obtaining the ReplicaSet's UID") e2elog.Logf("Obtaining the ReplicaSet's UID")
orphanedRSUID := rsList.Items[0].UID orphanedRSUID := rsList.Items[0].UID
@ -683,10 +683,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
rsList = listDeploymentReplicaSets(c, ns, podLabels) rsList = listDeploymentReplicaSets(c, ns, podLabels)
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1)) framework.ExpectEqual(len(rsList.Items), 1)
e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID)) framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID)
} }
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
@ -769,7 +769,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Second rollout's replicaset should have 0 available replicas. // Second rollout's replicaset should have 0 available replicas.
e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0))) framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0))
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas newReplicas := replicas + int32(maxSurge) - minAvailableReplicas

View File

@ -74,17 +74,17 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}) })
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() { ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
_, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{}) _, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{})
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
}) })
ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() { ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() {
_, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{}) _, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{})
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
}) })
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() { ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
_, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{}) _, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{})
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
}) })
ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() { ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() {
@ -101,7 +101,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap) _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap) framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{}) _, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
}) })
ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() { ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() {
@ -120,7 +120,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
ginkgo.By("Node should not get the secret") ginkgo.By("Node should not get the secret")
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) _, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
ginkgo.By("Create a pod that use the secret") ginkgo.By("Create a pod that use the secret")
pod := &v1.Pod{ pod := &v1.Pod{
@ -175,12 +175,12 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
} }
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
_, err := c.CoreV1().Nodes().Create(node) _, err := c.CoreV1().Nodes().Create(node)
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
}) })
ginkgo.It("A node shouldn't be able to delete another node", func() { ginkgo.It("A node shouldn't be able to delete another node", func() {
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{}) err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{})
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true)) framework.ExpectEqual(apierrors.IsForbidden(err), true)
}) })
}) })

View File

@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
coresPerNode = int((&cpu).MilliValue() / 1000) coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024) memCapacityMb = int((&mem).Value() / 1024 / 1024)
gomega.Expect(nodeCount).Should(gomega.Equal(sum)) framework.ExpectEqual(nodeCount, sum)
if framework.ProviderIs("gke") { if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(3) val, err := isAutoscalerEnabled(3)
@ -326,7 +326,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
ginkgo.By("Checking if the number of nodes is as expected") ginkgo.By("Checking if the number of nodes is as expected")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(totalNodes)) framework.ExpectEqual(len(nodes.Items), totalNodes)
}) })
ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() { ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {

View File

@ -123,7 +123,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory] mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
memAllocatableMb = int((&mem).Value() / 1024 / 1024) memAllocatableMb = int((&mem).Value() / 1024 / 1024)
gomega.Expect(nodeCount).Should(gomega.Equal(sum)) framework.ExpectEqual(nodeCount, sum)
if framework.ProviderIs("gke") { if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(5) val, err := isAutoscalerEnabled(5)
@ -186,7 +186,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
} }
} }
} }
gomega.Expect(eventFound).Should(gomega.Equal(true)) framework.ExpectEqual(eventFound, true)
// Verify that cluster size is not changed // Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, time.Second)) func(size int) bool { return size <= nodeCount }, time.Second))
@ -223,7 +223,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1) defer disableAutoscaler(gpuPoolName, 0, 1)
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
ginkgo.By("Schedule a pod which requires GPU") ginkgo.By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
@ -231,7 +231,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
}) })
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
@ -254,14 +254,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2) defer disableAutoscaler(gpuPoolName, 0, 2)
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
ginkgo.By("Scale GPU deployment") ginkgo.By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true) framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(2)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 2)
}) })
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
@ -280,7 +280,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1) defer disableAutoscaler(gpuPoolName, 0, 1)
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
@ -290,7 +290,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact // Expect gpu pool to stay intact
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
}) })
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
@ -313,14 +313,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1) defer disableAutoscaler(gpuPoolName, 0, 1)
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
ginkgo.By("Remove the only POD requiring GPU") ginkgo.By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout)) func(size int) bool { return size == nodeCount }, scaleDownTimeout))
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0)) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
}) })
ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]", ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
@ -360,10 +360,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
if status.target != target { if status.target != target {
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target) klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
} }
gomega.Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(gomega.Equal(false)) framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false)
gomega.Expect(status.status).Should(gomega.Equal(caNoScaleUpStatus)) framework.ExpectEqual(status.status, caNoScaleUpStatus)
gomega.Expect(status.ready).Should(gomega.Equal(status.target)) framework.ExpectEqual(status.ready, status.target)
gomega.Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(gomega.Equal(status.target + unmanagedNodes)) framework.ExpectEqual(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items), status.target+unmanagedNodes)
}) })
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() { ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -382,7 +382,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC") ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
nodes := getPoolNodes(f, extraPoolName) nodes := getPoolNodes(f, extraPoolName)
gomega.Expect(len(nodes)).Should(gomega.Equal(extraNodes)) framework.ExpectEqual(len(nodes), extraNodes)
extraMemMb := 0 extraMemMb := 0
for _, node := range nodes { for _, node := range nodes {
mem := node.Status.Allocatable[v1.ResourceMemory] mem := node.Status.Allocatable[v1.ResourceMemory]
@ -723,7 +723,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("No nodes should be removed") ginkgo.By("No nodes should be removed")
time.Sleep(scaleDownTimeout) time.Sleep(scaleDownTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(increasedSize)) framework.ExpectEqual(len(nodes.Items), increasedSize)
}) })
}) })
@ -831,7 +831,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// GKE-specific check // GKE-specific check
newSize := getPoolSize(f, extraPoolName) newSize := getPoolSize(f, extraPoolName)
gomega.Expect(newSize).Should(gomega.Equal(0)) framework.ExpectEqual(newSize, 0)
} }
gceScaleToZero := func() { gceScaleToZero := func() {
@ -862,7 +862,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// non-GKE only // non-GKE only
newSize, err := framework.GroupSize(minMig) newSize, err := framework.GroupSize(minMig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(newSize).Should(gomega.Equal(0)) framework.ExpectEqual(newSize, 0)
} }
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() { ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
@ -918,11 +918,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
time.Sleep(scaleUpTimeout) time.Sleep(scaleUpTimeout)
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
gomega.Expect(len(currentNodes.Items)).Should(gomega.Equal(len(nodes.Items) - nodesToBreakCount)) framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
status, err := getClusterwideStatus(c) status, err := getClusterwideStatus(c)
e2elog.Logf("Clusterwide status: %v", status) e2elog.Logf("Clusterwide status: %v", status)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(status).Should(gomega.Equal("Unhealthy")) framework.ExpectEqual(status, "Unhealthy")
} }
} }
testFunction() testFunction()
@ -1247,7 +1247,7 @@ func getPoolInitialSize(poolName string) int {
klog.Infof("Node-pool initial size: %s", output) klog.Infof("Node-pool initial size: %s", output)
framework.ExpectNoError(err, string(output)) framework.ExpectNoError(err, string(output))
fields := strings.Fields(string(output)) fields := strings.Fields(string(output))
gomega.Expect(len(fields)).Should(gomega.Equal(1)) framework.ExpectEqual(len(fields), 1)
size, err := strconv.ParseInt(fields[0], 10, 64) size, err := strconv.ParseInt(fields[0], 10, 64)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -112,7 +112,9 @@ while true; do sleep 1; done
gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase)) gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
gomega.Expect(terminateContainer.IsReady()).Should(gomega.Equal(testCase.Ready)) isReady, err := terminateContainer.IsReady()
framework.ExpectEqual(isReady, testCase.Ready)
framework.ExpectNoError(err)
status, err := terminateContainer.GetStatus() status, err := terminateContainer.GetStatus()
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -24,7 +24,6 @@ go_library(
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",
"//test/e2e/lifecycle:go_default_library", "//test/e2e/lifecycle:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
], ],
) )

View File

@ -18,7 +18,6 @@ package bootstrap
import ( import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -78,7 +77,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId] signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId]
gomega.Expect(ok).Should(gomega.Equal(true)) framework.ExpectEqual(ok, true)
ginkgo.By("update the cluster-info ConfigMap") ginkgo.By("update the cluster-info ConfigMap")
originalData := cfgMap.Data[bootstrapapi.KubeConfigKey] originalData := cfgMap.Data[bootstrapapi.KubeConfigKey]

View File

@ -132,7 +132,7 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
rsp, err := client.Do(req) rsp, err := client.Do(req)
framework.ExpectNoError(err, "failed to use http client to send delete") framework.ExpectNoError(err, "failed to use http client to send delete")
gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request") framework.ExpectEqual(rsp.StatusCode, http.StatusOK, "failed to delete gracefully by client request")
var lastPod v1.Pod var lastPod v1.Pod
err = json.NewDecoder(rsp.Body).Decode(&lastPod) err = json.NewDecoder(rsp.Body).Decode(&lastPod)
framework.ExpectNoError(err, "failed to decode graceful termination proxy response") framework.ExpectNoError(err, "failed to decode graceful termination proxy response")