Merge pull request #119558 from Songjoy/cleanup-e2e-storage-framework-equal

e2e_storage: stop using deprecated framework.ExpectEqual
This commit is contained in:
Kubernetes Prow Robot 2023-09-01 05:28:30 -07:00 committed by GitHub
commit b2499a1c28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 107 additions and 89 deletions

View File

@ -117,17 +117,17 @@ var _ = utils.SIGDescribe("CSIInlineVolumes", func() {
ginkgo.By("getting")
retrievedPod, err := client.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(retrievedPod.UID, createdPod.UID)
gomega.Expect(retrievedPod.UID).To(gomega.Equal(createdPod.UID))
ginkgo.By("listing in namespace")
podList, err := client.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(len(podList.Items), 1, "list should have 1 items, got: %s", podList)
gomega.Expect(podList.Items).To(gomega.HaveLen(1), "list should have 1 items, got: %s", podList)
ginkgo.By("patching")
patchedPod, err := client.Patch(ctx, createdPod.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedPod.Annotations["patched"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedPod.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
ginkgo.By("deleting")
err = client.Delete(ctx, createdPod.Name, metav1.DeleteOptions{})

View File

@ -23,7 +23,6 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -75,8 +74,8 @@ var _ = utils.SIGDescribe("CSI Mock fsgroup as mount option", func() {
framework.ExpectNoError(err, "failed to start pod")
if t.enableVolumeMountGroup {
framework.ExpectEqual(nodeStageFsGroup, fsGroupStr, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", fsGroupStr, nodeStageFsGroup)
framework.ExpectEqual(nodePublishFsGroup, fsGroupStr, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", fsGroupStr, nodePublishFsGroup)
gomega.Expect(nodeStageFsGroup).To(gomega.Equal(fsGroupStr), "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", fsGroupStr, nodeStageFsGroup)
gomega.Expect(nodePublishFsGroup).To(gomega.Equal(fsGroupStr), "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", fsGroupStr, nodePublishFsGroup)
} else {
gomega.Expect(nodeStageFsGroup).To(gomega.BeEmpty(), "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to be empty; got: %q", nodeStageFsGroup)
gomega.Expect(nodePublishFsGroup).To(gomega.BeEmpty(), "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to empty; got: %q", nodePublishFsGroup)

View File

@ -160,8 +160,8 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
// Assert
ginkgo.By("Checking the initial pod mount options")
framework.ExpectEqual(nodeStageMountOpts, t.expectedFirstMountOptions, "NodeStage MountFlags for the initial pod")
framework.ExpectEqual(nodePublishMountOpts, t.expectedFirstMountOptions, "NodePublish MountFlags for the initial pod")
gomega.Expect(nodeStageMountOpts).To(gomega.Equal(t.expectedFirstMountOptions), "NodeStage MountFlags for the initial pod")
gomega.Expect(nodePublishMountOpts).To(gomega.Equal(t.expectedFirstMountOptions), "NodePublish MountFlags for the initial pod")
ginkgo.By("Checking the CSI driver calls for the initial pod")
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage call count for the initial pod")
@ -229,7 +229,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage calls after the first pod is deleted")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage calls for the second pod")
// The second pod got the right mount option
framework.ExpectEqual(nodeStageMountOpts, t.expectedSecondMountOptions, "NodeStage MountFlags for the second pod")
gomega.Expect(nodeStageMountOpts).To(gomega.Equal(t.expectedSecondMountOptions), "NodeStage MountFlags for the second pod")
} else {
// Volume should not be fully unstaged between the first and the second pod
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage calls after the first pod is deleted")
@ -238,7 +238,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
// In both cases, Unublish and Publish is called, with the right mount opts
gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish calls after the first pod is deleted")
gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish calls for the second pod")
framework.ExpectEqual(nodePublishMountOpts, t.expectedSecondMountOptions, "NodePublish MountFlags for the second pod")
gomega.Expect(nodePublishMountOpts).To(gomega.Equal(t.expectedSecondMountOptions), "NodePublish MountFlags for the second pod")
})
}
})

View File

@ -151,7 +151,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
if test.lateBinding {
bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
}
framework.ExpectEqual(*sc.VolumeBindingMode, bindingMode, "volume binding mode")
gomega.Expect(*sc.VolumeBindingMode).To(gomega.Equal(bindingMode), "volume binding mode")
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start pod")
@ -354,7 +354,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
time.Sleep(syncDelay)
sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
gomega.Expect(sc.Name).To(gomega.Equal(scName), "pre-selected storage class name not used")
condition := anyOf(
podRunning(ctx, f.ClientSet, pod.Name, pod.Namespace),

View File

@ -165,7 +165,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
framework.ExpectNoError(err, "while waiting for PVC resize to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
}
// if node expansion is not required PVC should be resized as well
@ -179,7 +179,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
inProgressConditions := pvc.Status.Conditions
if len(inProgressConditions) > 0 {
framework.ExpectEqual(inProgressConditions[0].Type, v1.PersistentVolumeClaimFileSystemResizePending, "pvc must have fs resizing condition")
gomega.Expect(inProgressConditions[0].Type).To(gomega.Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition")
}
ginkgo.By("Deleting the previously created pod")
@ -329,7 +329,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
framework.ExpectNoError(err, "while waiting for all CSI calls")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
}
})
@ -390,7 +390,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
framework.ExpectNoError(err, "while waiting for PVC to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
}
@ -528,7 +528,7 @@ func validateExpansionSuccess(ctx context.Context, pvc *v1.PersistentVolumeClaim
framework.ExpectNoError(err, "while waiting for PVC to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
allocatedResource := pvc.Status.AllocatedResources.Storage()
gomega.Expect(allocatedResource).NotTo(gomega.BeNil())
expectedAllocatedResource := resource.MustParse(expectedAllocatedSize)
@ -537,7 +537,7 @@ func validateExpansionSuccess(ctx context.Context, pvc *v1.PersistentVolumeClaim
}
resizeStatus := pvc.Status.AllocatedResourceStatuses[v1.ResourceStorage]
framework.ExpectEqual(resizeStatus, "", "resize status should be empty")
gomega.Expect(resizeStatus).To(gomega.BeZero(), "resize status should be empty")
}
func waitForResizeStatus(pvc *v1.PersistentVolumeClaim, c clientset.Interface, expectedState v1.ClaimResourceStatus) error {

View File

@ -31,6 +31,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = utils.SIGDescribe("CSIStorageCapacity", func() {
@ -152,29 +153,29 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() {
ginkgo.By("getting")
gottenCSC, err := cscClient.Get(ctx, csc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(gottenCSC.UID, createdCSC.UID)
gomega.Expect(gottenCSC.UID).To(gomega.Equal(createdCSC.UID))
ginkgo.By("listing in namespace")
cscs, err := cscClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(cscs.Items), 3, "filtered list should have 3 items, got: %s", cscs)
gomega.Expect(cscs.Items).To(gomega.HaveLen(3), "filtered list should have 3 items, got: %s", cscs)
ginkgo.By("listing across namespaces")
cscs, err = cscClientNoNamespace.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(cscs.Items), 3, "filtered list should have 3 items, got: %s", cscs)
gomega.Expect(cscs.Items).To(gomega.HaveLen(3), "filtered list should have 3 items, got: %s", cscs)
ginkgo.By("patching")
patchedCSC, err := cscClient.Patch(ctx, createdCSC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedCSC.Annotations["patched"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedCSC.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
ginkgo.By("updating")
csrToUpdate := patchedCSC.DeepCopy()
csrToUpdate.Annotations["updated"] = "true"
updatedCSC, err := cscClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedCSC.Annotations["updated"], "true", "updated object should have the applied annotation")
gomega.Expect(updatedCSC.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
expectWatchResult := func(kind string, w watch.Interface) {
framework.Logf("waiting for watch events with expected annotations %s", kind)

View File

@ -42,6 +42,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
// DriverDefinition needs to be filled in via a .yaml or .json
@ -287,7 +288,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context,
var ok bool
items, err := utils.LoadFromManifests(d.StorageClass.FromFile)
framework.ExpectNoError(err, "load storage class from %s", d.StorageClass.FromFile)
framework.ExpectEqual(len(items), 1, "exactly one item from %s", d.StorageClass.FromFile)
gomega.Expect(items).To(gomega.HaveLen(1), "exactly one item from %s", d.StorageClass.FromFile)
err = utils.PatchItems(f, f.Namespace, items...)
framework.ExpectNoError(err, "patch items")

View File

@ -138,7 +138,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
pvs, err = e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)
gomega.Expect(pvs).To(gomega.HaveLen(1))
ginkgo.By("Creating a deployment with the provisioned volume")
deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, admissionapi.LevelRestricted, "")
@ -180,6 +180,6 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
})

View File

@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
pvs, err = e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)
gomega.Expect(pvs).To(gomega.HaveLen(1))
var pod *v1.Pod
ginkgo.By("Creating pod")
@ -169,7 +169,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
})

View File

@ -20,6 +20,7 @@ import (
"context"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -110,7 +111,7 @@ func createPodPVCFromSC(ctx context.Context, f *framework.Framework, c clientset
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)
gomega.Expect(pvs).To(gomega.HaveLen(1))
ginkgo.By("Creating a pod with dynamically provisioned volume")
podConfig := e2epod.Config{

View File

@ -34,6 +34,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() {
@ -478,7 +479,7 @@ func verifyPodHostPathTypeFailure(ctx context.Context, f *framework.Framework, n
// Check the pod is still not running
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodPending), "Pod phase isn't pending")
f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
}

View File

@ -127,7 +127,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() {
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := testVol.pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
})

View File

@ -119,7 +119,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
ginkgo.By("Checking for bound PVC")
pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)
gomega.Expect(pvs).To(gomega.HaveLen(1))
ginkgo.By("Wait for a pod from deployment to be running")
podList, err := e2edeployment.GetPodsForDeployment(ctx, c, deployment)
@ -164,7 +164,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
})

View File

@ -26,6 +26,7 @@ import (
"google.golang.org/api/googleapi"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -403,17 +404,17 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
framework.ExpectEqual(true, strings.Contains(string(output), string(host0Name)))
gomega.Expect(string(output)).Should(gomega.ContainSubstring(string(host0Name)))
ginkgo.By("deleting host0")
err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err))
ginkgo.By("expecting host0 node to be re-created")
numNodes := countReadyNodes(ctx, cs, host0Name)
framework.ExpectEqual(numNodes, origNodeCnt, fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))
gomega.Expect(numNodes).To(gomega.Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))
output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
framework.ExpectEqual(true, strings.Contains(string(output), string(host0Name)))
gomega.Expect(string(output)).Should(gomega.ContainSubstring(string(host0Name)))
} else if disruptOp == deleteNodeObj {
ginkgo.By("deleting host0's node api object")

View File

@ -20,6 +20,7 @@ import (
"context"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -130,7 +131,9 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f
ginkgo.By("Deleting the Claim")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
framework.ExpectEqual(verifyGCEDiskAttached(diskName, node), true)
if !verifyGCEDiskAttached(diskName, node) {
framework.Failf("Disk %s is not attached to node %s", diskName, node)
}
ginkgo.By("Deleting the Pod")
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name)
@ -145,7 +148,9 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f
ginkgo.By("Deleting the Persistent Volume")
framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectEqual(verifyGCEDiskAttached(diskName, node), true)
if !verifyGCEDiskAttached(diskName, node) {
framework.Failf("Disk %s is not attached to node %s", diskName, node)
}
ginkgo.By("Deleting the client pod")
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name)

View File

@ -819,7 +819,7 @@ func verifyLocalPod(ctx context.Context, config *localTestConfig, volume *localT
podNodeName, err := podNodeName(ctx, config, pod)
framework.ExpectNoError(err)
framework.Logf("pod %q created on Node %q", pod.Name, podNodeName)
framework.ExpectEqual(podNodeName, expectedNodeName)
gomega.Expect(podNodeName).To(gomega.Equal(expectedNodeName))
}
func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) e2epv.PersistentVolumeClaimConfig {
@ -1168,10 +1168,10 @@ func validateStatefulSet(ctx context.Context, config *localTestConfig, ss *appsv
if anti {
// Verify that each pod is on a different node
framework.ExpectEqual(nodes.Len(), len(pods.Items))
gomega.Expect(pods.Items).To(gomega.HaveLen(nodes.Len()))
} else {
// Verify that all pods are on same node.
framework.ExpectEqual(nodes.Len(), 1)
gomega.Expect(nodes.Len()).To(gomega.Equal(1))
}
// Validate all PVCs are bound

View File

@ -21,13 +21,13 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -89,7 +89,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While getting PV status")
framework.ExpectEqual(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil), true, "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers)
gomega.Expect(pv.ObjectMeta.Finalizers).Should(gomega.ContainElement(volumeutil.PVProtectionFinalizer), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers)
})
ginkgo.AfterEach(func(ctx context.Context) {

View File

@ -20,6 +20,7 @@ import (
"context"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"fmt"
"time"
@ -28,7 +29,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -105,7 +105,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking that PVC Protection finalizer is set")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While getting PVC status")
framework.ExpectEqual(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil), true, "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers)
gomega.Expect(pvc.ObjectMeta.Finalizers).Should(gomega.ContainElement(volumeutil.PVCProtectionFinalizer), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers)
})
ginkgo.AfterEach(func(ctx context.Context) {

View File

@ -22,6 +22,8 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -109,7 +111,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() {
framework.ExpectNoError(err)
updatedPVC, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(*updatedPVC.Spec.StorageClassName, storageClass.Name, "Expected PVC %v to have StorageClass %v, but it has StorageClass %v instead", updatedPVC.Name, prefixSC, updatedPVC.Spec.StorageClassName)
gomega.Expect(*updatedPVC.Spec.StorageClassName).To(gomega.Equal(storageClass.Name), "Expected PVC %v to have StorageClass %v, but it has StorageClass %v instead", updatedPVC.Name, prefixSC, updatedPVC.Spec.StorageClassName)
framework.Logf("Success - PersistentVolumeClaim %s got updated retroactively with StorageClass %v", updatedPVC.Name, storageClass.Name)
})
})

View File

@ -282,7 +282,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) {
}
ginkgo.By("verifying the same PVC is used by the new pod")
framework.ExpectEqual(getPVC(ctx, c, ns, regionalPDLabels).Name, pvc.Name, "The same PVC should be used after failover.")
gomega.Expect(getPVC(ctx, c, ns, regionalPDLabels).Name).To(gomega.Equal(pvc.Name), "The same PVC should be used after failover.")
ginkgo.By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.")
logs, err := e2epod.GetPodLogs(ctx, c, ns, pod.Name, "")
@ -290,7 +290,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) {
"Error getting logs from pod %s in namespace %s", pod.Name, ns)
lineCount := len(strings.Split(strings.TrimSpace(logs), "\n"))
expectedLineCount := 2
framework.ExpectEqual(lineCount, expectedLineCount, "Line count of the written file should be %d.", expectedLineCount)
gomega.Expect(lineCount).To(gomega.Equal(expectedLineCount), "Line count of the written file should be %d.", expectedLineCount)
}
@ -449,7 +449,7 @@ func getPVC(ctx context.Context, c clientset.Interface, ns string, pvcLabels map
options := metav1.ListOptions{LabelSelector: selector.String()}
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, options)
framework.ExpectNoError(err)
framework.ExpectEqual(len(pvcList.Items), 1, "There should be exactly 1 PVC matched.")
gomega.Expect(pvcList.Items).To(gomega.HaveLen(1), "There should be exactly 1 PVC matched.")
return &pvcList.Items[0]
}
@ -459,7 +459,7 @@ func getPod(ctx context.Context, c clientset.Interface, ns string, podLabels map
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(ctx, options)
framework.ExpectNoError(err)
framework.ExpectEqual(len(podList.Items), 1, "There should be exactly 1 pod matched.")
gomega.Expect(podList.Items).To(gomega.HaveLen(1), "There should be exactly 1 pod matched.")
return &podList.Items[0]
}

View File

@ -274,7 +274,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
return nil
}
l.testCase.TestEphemeral(ctx)

View File

@ -764,7 +764,7 @@ func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.Persi
}()
// ensure that the claim refers to the provisioned StorageClass
framework.ExpectEqual(*claim.Spec.StorageClassName, class.Name)
gomega.Expect(*claim.Spec.StorageClassName).To(gomega.Equal(class.Name))
// if late binding is configured, create and delete a pod to provision the volume
if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
@ -856,17 +856,17 @@ func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientse
}
}
framework.ExpectEqual(pv.Spec.ClaimRef.Name, claim.ObjectMeta.Name)
framework.ExpectEqual(pv.Spec.ClaimRef.Namespace, claim.ObjectMeta.Namespace)
gomega.Expect(pv.Spec.ClaimRef.Name).To(gomega.Equal(claim.ObjectMeta.Name))
gomega.Expect(pv.Spec.ClaimRef.Namespace).To(gomega.Equal(claim.ObjectMeta.Namespace))
if class == nil {
framework.ExpectEqual(pv.Spec.PersistentVolumeReclaimPolicy, v1.PersistentVolumeReclaimDelete)
gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete))
} else {
framework.ExpectEqual(pv.Spec.PersistentVolumeReclaimPolicy, *class.ReclaimPolicy)
framework.ExpectEqual(pv.Spec.MountOptions, class.MountOptions)
gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*class.ReclaimPolicy))
gomega.Expect(pv.Spec.MountOptions).To(gomega.Equal(class.MountOptions))
}
if claim.Spec.VolumeMode != nil {
gomega.Expect(pv.Spec.VolumeMode).NotTo(gomega.BeNil())
framework.ExpectEqual(*pv.Spec.VolumeMode, *claim.Spec.VolumeMode)
gomega.Expect(*pv.Spec.VolumeMode).To(gomega.Equal(*claim.Spec.VolumeMode))
}
return pv
}
@ -938,7 +938,7 @@ func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface,
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node")
gomega.Expect(node.Name).To(gomega.BeZero(), "this test only works when not locked onto a single node")
var pod *v1.Pod
defer func() {
@ -1043,7 +1043,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
framework.ExpectNoError(err)
pvs = append(pvs, pv)
}
framework.ExpectEqual(len(pvs), len(createdClaims))
gomega.Expect(pvs).To(gomega.HaveLen(len(createdClaims)))
return pvs, node
}
@ -1195,7 +1195,7 @@ func verifyPVCsPending(ctx context.Context, client clientset.Interface, pvcs []*
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
}
}

View File

@ -440,9 +440,9 @@ func checkSnapshot(ctx context.Context, dc dynamic.Interface, sr *storageframewo
ginkgo.By("checking the SnapshotContent")
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
if pattern.SnapshotType != storageframework.PreprovisionedCreatedSnapshot {
framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName())
gomega.Expect(snapshotContentSpec["volumeSnapshotClassName"]).To(gomega.Equal(vsc.GetName()))
}
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
gomega.Expect(volumeSnapshotRef).To(gomega.HaveKeyWithValue("name", vs.GetName()))
gomega.Expect(volumeSnapshotRef).To(gomega.HaveKeyWithValue("namespace", vs.GetNamespace()))
return vscontent
}

View File

@ -236,7 +236,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := l.resource.Pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) {
@ -285,7 +285,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := l.resource.Pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
})
}

View File

@ -251,7 +251,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
// Check the pod is still not running
p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodPending), "Pod phase isn't pending")
})
}
@ -289,7 +289,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
// Check the pvc is still pending
pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(ctx, l.Pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)")
framework.ExpectEqual(pvc.Status.Phase, v1.ClaimPending, "PVC phase isn't pending")
gomega.Expect(pvc.Status.Phase).To(gomega.Equal(v1.ClaimPending), "PVC phase isn't pending")
})
}
default:
@ -348,7 +348,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
// Check the pod is still not running
p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodPending), "Pod phase isn't pending")
})
ginkgo.It("should not mount / map unused volumes in a pod [LinuxOnly]", func(ctx context.Context) {

View File

@ -70,8 +70,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
gomega.Expect(expectedFSGroup).To(gomega.Equal(fsGroupResult), "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// getKubeletMainPid return the Main PID of the Kubelet Process
@ -141,14 +140,14 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
ginkgo.By("Writing to the volume.")
@ -201,7 +200,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
@ -262,13 +261,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c client
result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Expecting the symlinks from global map path to be found.")
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
@ -699,7 +698,7 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
framework.ExpectEqual(ll[3], expectedGid)
gomega.Expect(ll[3]).To(gomega.Equal(expectedGid))
}
// ChangeFilePathGidInPod changes the GID of the target filepath.

View File

@ -546,7 +546,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// should be 4, and the elements should be bound pv count, unbound pv count, bound
// pvc count, unbound pvc count in turn.
validator := func(ctx context.Context, metricValues []map[string]int64) {
framework.ExpectEqual(len(metricValues), 4, "Wrong metric size: %d", len(metricValues))
gomega.Expect(metricValues).To(gomega.HaveLen(4), "Wrong metric size: %d", len(metricValues))
controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx)
framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err)
@ -561,7 +561,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// test suit are equal to expectValues.
actualValues := calculateRelativeValues(originMetricValues[i],
testutil.GetMetricValuesForLabel(testutil.Metrics(controllerMetrics), metric.name, metric.dimension))
framework.ExpectEqual(actualValues, expectValues, "Wrong pv controller metric %s(%s): wanted %v, got %v",
gomega.Expect(actualValues).To(gomega.Equal(expectValues), "Wrong pv controller metric %s(%s): wanted %v, got %v",
metric.name, metric.dimension, expectValues, actualValues)
}
}
@ -810,7 +810,7 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string
}
}
}
framework.ExpectEqual(errCount, 0, "Found invalid samples")
gomega.Expect(errCount).To(gomega.Equal(0), "Found invalid samples")
return found
}

View File

@ -580,7 +580,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
})
// Modifying the default storage class can be disruptive to other tests that depend on it
@ -619,7 +619,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
})
})
@ -692,7 +692,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
func verifyDefaultStorageClass(ctx context.Context, c clientset.Interface, scName string, expectedDefault bool) {
sc, err := c.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(storageutil.IsDefaultAnnotation(sc.ObjectMeta), expectedDefault)
gomega.Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(gomega.Equal(expectedDefault))
}
func updateDefaultStorageClass(ctx context.Context, c clientset.Interface, scName string, defaultStr string) {

View File

@ -88,7 +88,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
framework.ExpectNoError(e2estatefulset.CheckMount(ctx, client, statefulset, mountPath))
ssPodsBeforeScaleDown := e2estatefulset.GetPodList(ctx, client, statefulset)
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")
gomega.Expect(ssPodsBeforeScaleDown.Items).To(gomega.HaveLen(int(replicas)), "Number of Pods in the statefulset should match with number of replicas")
// Get the list of Volumes attached to Pods before scale down
volumesBeforeScaleDown := make(map[string]string)
@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
ssPodsAfterScaleUp := e2estatefulset.GetPodList(ctx, client, statefulset)
gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")
gomega.Expect(ssPodsAfterScaleUp.Items).To(gomega.HaveLen(int(replicas)), "Number of Pods in the statefulset should match with number of replicas")
// After scale up, verify all vsphere volumes are attached to node VMs.
ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")

View File

@ -147,7 +147,9 @@ func invokeTest(ctx context.Context, f *framework.Framework, client clientset.In
framework.ExpectNoError(err)
ginkgo.By("Verify Disk Format")
framework.ExpectEqual(verifyDiskFormat(ctx, client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat), true, "DiskFormat Verification Failed")
if !verifyDiskFormat(ctx, client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat) {
framework.Failf("DiskFormat Verification Failed. Node: %s, VolumePath: %s, Expected Format: %s", nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)
}
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)

View File

@ -21,6 +21,8 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -95,6 +97,6 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
ginkgo.By("Verifying if provisioned PV has the correct size")
expectedCapacity := resource.MustParse(expectedDiskSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
framework.ExpectEqual(pvCapacity.Value(), expectedCapacity.Value())
gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()))
})
})

View File

@ -81,7 +81,9 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
// Ready nodes should be 1 less
ginkgo.By("Verifying the ready node counts")
framework.ExpectEqual(verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount-1), true, "Unable to verify expected ready node count")
if !verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount-1) {
framework.Failf("Unable to verify expected ready node count. Total Nodes: %d, Expected Ready Nodes: %d", totalNodesCount, totalNodesCount-1)
}
nodeList, err = e2enode.GetReadySchedulableNodes(ctx, client)
framework.ExpectNoError(err)
@ -98,7 +100,9 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
// Ready nodes should be equal to earlier count
ginkgo.By("Verifying the ready node counts")
framework.ExpectEqual(verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount), true, "Unable to verify expected ready node count")
if !verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount) {
framework.Failf("Unable to verify expected ready node count. Total Nodes: %d, Expected Ready Nodes: %d", totalNodesCount, totalNodesCount)
}
nodeList, err = e2enode.GetReadySchedulableNodes(ctx, client)
framework.ExpectNoError(err)