feat: use framework.ExpectNotEqual in e2e test

This commit is contained in:
draveness 2019-07-07 18:32:35 +08:00
parent 021ad88ac4
commit d3158b2c71
24 changed files with 52 additions and 58 deletions

View File

@ -174,7 +174,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts.Continue = inconsistentToken
list, err = client.List(opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
gomega.Expect(list.ResourceVersion).ToNot(gomega.Equal(firstRV))
framework.ExpectNotEqual(list.ResourceVersion, firstRV)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit))
found := int(oneTenth)

View File

@ -110,9 +110,9 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
Do().Into(pagedTable)
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
framework.ExpectEqual(len(pagedTable.Rows), 2)
gomega.Expect(pagedTable.ResourceVersion).ToNot(gomega.Equal(""))
gomega.Expect(pagedTable.SelfLink).ToNot(gomega.Equal(""))
gomega.Expect(pagedTable.Continue).ToNot(gomega.Equal(""))
framework.ExpectNotEqual(pagedTable.ResourceVersion, "")
framework.ExpectNotEqual(pagedTable.SelfLink, "")
framework.ExpectNotEqual(pagedTable.Continue, "")
framework.ExpectEqual(pagedTable.Rows[0].Cells[0], "template-0000")
framework.ExpectEqual(pagedTable.Rows[1].Cells[0], "template-0001")
@ -137,8 +137,8 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
gomega.Expect(len(table.Rows)).To(gomega.BeNumerically(">=", 1))
framework.ExpectEqual(len(table.Rows[0].Cells), len(table.ColumnDefinitions))
framework.ExpectEqual(table.ColumnDefinitions[0].Name, "Name")
gomega.Expect(table.ResourceVersion).ToNot(gomega.Equal(""))
gomega.Expect(table.SelfLink).ToNot(gomega.Equal(""))
framework.ExpectNotEqual(table.ResourceVersion, "")
framework.ExpectNotEqual(table.SelfLink, "")
out := printTable(table)
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))

View File

@ -316,7 +316,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
waitForHistoryCreated(c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
framework.ExpectEqual(cur.Revision, int64(2))
gomega.Expect(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash))
framework.ExpectNotEqual(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey], firstHash)
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
})
@ -425,9 +425,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
if len(schedulableNodes.Items) < 2 {
framework.ExpectEqual(len(existingPods), 0)
} else {
gomega.Expect(len(existingPods)).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(len(existingPods), 0)
}
gomega.Expect(len(newPods)).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(len(newPods), 0)
e2elog.Logf("Roll back the DaemonSet before rollout is complete")
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {

View File

@ -247,7 +247,7 @@ func testDeleteDeployment(f *framework.Framework) {
framework.ExpectNoError(err)
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
framework.ExpectNoError(err)
gomega.Expect(newRS).NotTo(gomega.Equal(nilRs))
framework.ExpectNotEqual(newRS, nilRs)
stopDeployment(c, ns, deploymentName)
}

View File

@ -311,7 +311,7 @@ var _ = SIGDescribe("StatefulSet", func() {
oldImage := ss.Spec.Template.Spec.Containers[0].Image
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
framework.ExpectNotEqual(oldImage, newImage, "Incorrect test setup: should update to a different image")
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
@ -320,8 +320,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating a new revision")
ss = e2esset.WaitForStatus(c, ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update")
ginkgo.By("Not applying an update when the partition is greater than the number of replicas")
for i := range pods.Items {
@ -512,7 +511,7 @@ var _ = SIGDescribe("StatefulSet", func() {
oldImage := ss.Spec.Template.Spec.Containers[0].Image
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
framework.ExpectNotEqual(oldImage, newImage, "Incorrect test setup: should update to a different image")
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
@ -521,8 +520,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating a new revision")
ss = e2esset.WaitForStatus(c, ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update")
ginkgo.By("Recreating Pods at the new revision")
e2esset.DeleteStatefulPodAtIndex(c, 0, ss)
@ -1081,7 +1079,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
oldImage := ss.Spec.Template.Spec.Containers[0].Image
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
framework.ExpectNotEqual(oldImage, newImage, "Incorrect test setup: should update to a different image")
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
@ -1090,8 +1088,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
ginkgo.By("Creating a new revision")
ss = e2esset.WaitForStatus(c, ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update")
ginkgo.By("Updating Pods in reverse ordinal order")
pods = e2esset.GetPodList(c, ss)
@ -1130,9 +1127,8 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
framework.ExpectNoError(err)
ss = e2esset.WaitForStatus(c, ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
"Current revision should not equal update revision during roll back")
framework.ExpectEqual(priorRevision, updateRevision, "Prior revision should equal update revision during roll back")
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during roll back")
ginkgo.By("Rolling back update in reverse ordinal order")
pods = e2esset.GetPodList(c, ss)

View File

@ -31,7 +31,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -53,12 +52,12 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns)
gomega.Expect(len(nodeList.Items)).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(len(nodeList.Items), 0)
nodeName = nodeList.Items[0].Name
asUser = nodeNamePrefix + nodeName
saName := "default"
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
gomega.Expect(len(sa.Secrets)).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(len(sa.Secrets), 0)
framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName)
defaultSaSecret = sa.Secrets[0].Name
ginkgo.By("Creating a kubernetes client that impersonates a node")

View File

@ -1352,6 +1352,11 @@ func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{})
gomega.Expect(actual).To(gomega.Equal(extra), explain...)
}
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.Expect(actual).NotTo(gomega.Equal(extra), explain...)
}
// ExpectError expects an error happens, otherwise an exception raises
func ExpectError(err error, explain ...interface{}) {
gomega.Expect(err).To(gomega.HaveOccurred(), explain...)

View File

@ -22,7 +22,6 @@ go_library(
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

View File

@ -24,7 +24,6 @@ import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
@ -80,7 +79,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
gomega.Expect(totalPods).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(totalPods, 0)
kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.

View File

@ -52,7 +52,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
var statusCode int
result.StatusCode(&statusCode)
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
framework.ExpectNotEqual(statusCode, http.StatusOK)
})
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
result, err := e2enode.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
@ -60,7 +60,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
var statusCode int
result.StatusCode(&statusCode)
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
framework.ExpectNotEqual(statusCode, http.StatusOK)
})
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address

View File

@ -144,7 +144,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
deletedNodeName = originalNodeName
break
}
gomega.Expect(deletedNodeName).NotTo(gomega.Equal(""))
framework.ExpectNotEqual(deletedNodeName, "")
gomega.Eventually(func() error {
if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil {
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)

View File

@ -23,7 +23,6 @@ import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
@ -59,7 +58,7 @@ func networkingIPerfTest(isIPv6 bool) {
totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
gomega.Expect(totalPods).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(totalPods, 0)
appName := "iperf-e2e"
_, err := f.CreateServiceForSimpleAppWithPods(
8001,

View File

@ -94,7 +94,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// Fail the test if the namespace is not set. We expect that the
// namespace is unique and we might delete user data if it's not.
if len(f.Namespace.Name) == 0 {
gomega.Expect(f.Namespace.Name).ToNot(gomega.Equal(""))
framework.ExpectNotEqual(f.Namespace.Name, "")
return
}

View File

@ -34,7 +34,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
)
@ -183,7 +182,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
framework.ExpectNoError(err)
framework.ExpectEqual(len(podList.Items), 2)
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1]))
framework.ExpectNotEqual(nodeNames[0], nodeNames[1])
ginkgo.By("Applying a random label to both nodes.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone"

View File

@ -23,7 +23,6 @@ import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
@ -146,7 +145,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Verify the pod was scheduled to the expected node.")
gomega.Expect(labelPod.Spec.NodeName).NotTo(gomega.Equal(nodeName))
framework.ExpectNotEqual(labelPod.Spec.NodeName, nodeName)
})
ginkgo.It("Pod should avoid nodes that have avoidPod annotation", func() {
@ -208,7 +207,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
for _, pod := range testPods.Items {
gomega.Expect(pod.Spec.NodeName).NotTo(gomega.Equal(nodeName))
framework.ExpectNotEqual(pod.Spec.NodeName, nodeName)
}
})

View File

@ -81,7 +81,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
break
}
}
gomega.Expect(extraZone).NotTo(gomega.Equal(""), fmt.Sprintf("No extra zones available in region %s", region))
framework.ExpectNotEqual(extraZone, "", fmt.Sprintf("No extra zones available in region %s", region))
ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
project := framework.TestContext.CloudConfig.ProjectID

View File

@ -1087,7 +1087,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
break
}
e2elog.Logf("Volume ID: %s", volumeID)
gomega.Expect(volumeID).NotTo(gomega.Equal(""))
framework.ExpectNotEqual(volumeID, "")
return &cinderVolume{
volumeName: volumeName,
volumeID: volumeID,

View File

@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.By("Checking that the PV status is Terminating")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PV status")
gomega.Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))

View File

@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status")
gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod)
@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status")
gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
@ -130,7 +130,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking again that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status")
gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the first pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod)

View File

@ -457,7 +457,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[strin
// TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error
gomega.Expect(len(claims)).ToNot(gomega.Equal(0))
framework.ExpectNotEqual(len(claims), 0)
namespace := claims[0].Namespace
ginkgo.By("creating a storage class " + t.Class.Name)

View File

@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err)
gomega.Expect(pvc).ToNot(gomega.Equal(nil))
framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc}
@ -124,8 +124,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
gomega.Expect(len(updatedStorageMetrics.latencyMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics")
gomega.Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics")
framework.ExpectNotEqual(len(updatedStorageMetrics.latencyMetrics), 0, "Error fetching c-m updated storage metrics")
framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics")
volumeOperations := []string{"volume_provision", "volume_detach", "volume_attach"}
@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pvc.Spec.StorageClassName = &invalidSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name)
gomega.Expect(pvc).ToNot(gomega.Equal(nil))
framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc}
@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(err, "failed to get controller manager metrics")
updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics)
gomega.Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics")
framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics")
verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true)
})
@ -194,7 +194,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err)
gomega.Expect(pvc).ToNot(gomega.Equal(nil))
framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
@ -251,7 +251,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err)
gomega.Expect(pvc).ToNot(gomega.Equal(nil))
framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
@ -282,7 +282,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err)
gomega.Expect(pvc).ToNot(gomega.Equal(nil))
framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
@ -312,7 +312,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err)
gomega.Expect(pvc).ToNot(gomega.Equal(nil))
framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")

View File

@ -1231,7 +1231,7 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
func getRandomClusterZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c)
framework.ExpectNoError(err)
gomega.Expect(len(zones)).ToNot(gomega.Equal(0))
framework.ExpectNotEqual(len(zones), 0)
zonesList := zones.UnsortedList()
return zonesList[rand.Intn(zones.Len())]

View File

@ -20,7 +20,6 @@ import (
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -65,7 +64,7 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
framework.ExpectNoError(err)
}
if err == nil {
gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
framework.ExpectNotEqual(pod.Status.Phase, v1.PodRunning)
}
t.verifySafeSysctlWork(f)

View File

@ -187,7 +187,7 @@ func getNodeMemory(f *framework.Framework) nodeMemory {
// Assuming that agent nodes have the same config
// Make sure there is >0 agent nodes, then use the first one for info
gomega.Expect(nodeList.Size()).NotTo(gomega.Equal(0))
framework.ExpectNotEqual(nodeList.Size(), 0)
ginkgo.By("Getting memory details from node status and kubelet config")