mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
test/e2e/storage : use framework.Equal() replace gomega.Expect(...).To(gomega.BeTrue(),...)
This commit is contained in:
parent
eef4c00ae9
commit
f8e0c6bb41
@ -465,7 +465,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
sc, pvc, pod := createPod(false)
|
||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
||||
|
||||
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
||||
framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion")
|
||||
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||
@ -557,7 +557,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
sc, pvc, pod := createPod(false)
|
||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
||||
|
||||
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
||||
framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion")
|
||||
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||
|
@ -35,7 +35,6 @@ go_library(
|
||||
"//test/e2e/storage/vsphere:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -43,7 +43,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
@ -119,7 +118,7 @@ func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
nv, ok := volume.(*nfsVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to NFS test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
|
||||
return &v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: nv.serverIP,
|
||||
@ -131,7 +130,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
|
||||
|
||||
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
nv, ok := volume.(*nfsVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to NFS test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
|
||||
return &v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: nv.serverIP,
|
||||
@ -260,7 +259,7 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
|
||||
|
||||
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
gv, ok := volume.(*glusterVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Gluster test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
|
||||
|
||||
name := gv.prefix + "-server"
|
||||
return &v1.VolumeSource{
|
||||
@ -275,7 +274,7 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume t
|
||||
|
||||
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
gv, ok := volume.(*glusterVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Gluster test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
|
||||
|
||||
name := gv.prefix + "-server"
|
||||
return &v1.PersistentVolumeSource{
|
||||
@ -384,7 +383,7 @@ func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
iv, ok := volume.(*iSCSIVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to iSCSI test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
|
||||
|
||||
volSource := v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
@ -402,7 +401,7 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
|
||||
|
||||
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
iv, ok := volume.(*iSCSIVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to iSCSI test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
|
||||
|
||||
pvSource := v1.PersistentVolumeSource{
|
||||
ISCSI: &v1.ISCSIPersistentVolumeSource{
|
||||
@ -501,7 +500,7 @@ func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
rv, ok := volume.(*rbdVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to RBD test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
|
||||
|
||||
volSource := v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
@ -523,7 +522,7 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
|
||||
|
||||
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
rv, ok := volume.(*rbdVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to RBD test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
|
||||
|
||||
f := rv.f
|
||||
ns := f.Namespace
|
||||
@ -624,7 +623,7 @@ func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
cv, ok := volume.(*cephVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Ceph test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
|
||||
|
||||
return &v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
@ -640,7 +639,7 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test
|
||||
|
||||
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
cv, ok := volume.(*cephVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Ceph test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
|
||||
|
||||
ns := cv.f.Namespace
|
||||
|
||||
@ -794,7 +793,7 @@ func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPat
|
||||
|
||||
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
hv, ok := volume.(*hostPathSymlinkVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume")
|
||||
|
||||
// hostPathSymlink doesn't support readOnly volume
|
||||
if readOnly {
|
||||
@ -1013,7 +1012,7 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
cv, ok := volume.(*cinderVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Cinder test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
|
||||
|
||||
volSource := v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
@ -1029,7 +1028,7 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume test
|
||||
|
||||
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
cv, ok := volume.(*cinderVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Cinder test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
|
||||
|
||||
pvSource := v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderPersistentVolumeSource{
|
||||
@ -1191,7 +1190,7 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
gv, ok := volume.(*gcePdVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to GCE PD test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
|
||||
volSource := v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: gv.volumeName,
|
||||
@ -1206,7 +1205,7 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
|
||||
|
||||
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
gv, ok := volume.(*gcePdVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to GCE PD test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
|
||||
pvSource := v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: gv.volumeName,
|
||||
@ -1316,7 +1315,7 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
vsv, ok := volume.(*vSphereVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to vSphere test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
|
||||
|
||||
// vSphere driver doesn't seem to support readOnly volume
|
||||
// TODO: check if it is correct
|
||||
@ -1336,7 +1335,7 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume tes
|
||||
|
||||
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
vsv, ok := volume.(*vSphereVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to vSphere test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
|
||||
|
||||
// vSphere driver doesn't seem to support readOnly volume
|
||||
// TODO: check if it is correct
|
||||
@ -1445,8 +1444,7 @@ func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
|
||||
|
||||
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
av, ok := volume.(*azureDiskVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Azure test volume")
|
||||
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
|
||||
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
|
||||
|
||||
kind := v1.AzureManagedDisk
|
||||
@ -1466,7 +1464,7 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume t
|
||||
|
||||
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
av, ok := volume.(*azureDiskVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Azure test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
|
||||
|
||||
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
|
||||
|
||||
@ -1579,7 +1577,7 @@ func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
|
||||
func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
|
||||
av, ok := volume.(*awsVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to AWS test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
|
||||
volSource := v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: av.volumeName,
|
||||
@ -1594,7 +1592,7 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
|
||||
|
||||
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
av, ok := volume.(*awsVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to AWS test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
|
||||
pvSource := v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: av.volumeName,
|
||||
@ -1832,7 +1830,7 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity
|
||||
|
||||
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
lv, ok := volume.(*localVolume)
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to local test volume")
|
||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume")
|
||||
return &v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: lv.ltr.Path,
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -86,7 +85,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
|
||||
ginkgo.By("Checking that PV Protection finalizer is set")
|
||||
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "While getting PV status")
|
||||
gomega.Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(gomega.BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers)
|
||||
framework.ExpectEqual(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil), true, "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
|
@ -18,12 +18,11 @@ package storage
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -95,7 +94,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
|
||||
ginkgo.By("Checking that PVC Protection finalizer is set")
|
||||
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "While getting PVC status")
|
||||
gomega.Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(gomega.BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers)
|
||||
framework.ExpectEqual(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil), true, "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
|
@ -217,8 +217,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
||||
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
|
||||
if err != nil {
|
||||
pod := getPod(c, ns, regionalPDLabels)
|
||||
gomega.Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(gomega.BeTrue(),
|
||||
"The statefulset pod has the following conditions: %s", pod.Status.Conditions)
|
||||
framework.ExpectEqual(podutil.IsPodReadyConditionTrue(pod.Status), true, "The statefulset pod has the following conditions: %s", pod.Status.Conditions)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -269,8 +268,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
||||
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
|
||||
if err != nil {
|
||||
pod := getPod(c, ns, regionalPDLabels)
|
||||
gomega.Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(gomega.BeTrue(),
|
||||
"The statefulset pod has the following conditions: %s", pod.Status.Conditions)
|
||||
framework.ExpectEqual(podutil.IsPodReadyConditionTrue(pod.Status), true, "The statefulset pod has the following conditions: %s", pod.Status.Conditions)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -279,14 +279,14 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
if checkSubpath {
|
||||
ginkgo.By("Expecting the volume subpath mount to be found.")
|
||||
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
}
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
|
@ -153,7 +153,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify Disk Format")
|
||||
gomega.Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(gomega.BeTrue(), "DiskFormat Verification Failed")
|
||||
framework.ExpectEqual(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat), true, "DiskFormat Verification Failed")
|
||||
|
||||
var volumePaths []string
|
||||
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
|
||||
@ -195,7 +195,7 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath
|
||||
}
|
||||
}
|
||||
|
||||
gomega.Expect(diskFound).To(gomega.BeTrue(), "Failed to find disk")
|
||||
framework.ExpectEqual(diskFound, true, "Failed to find disk")
|
||||
isDiskFormatCorrect := false
|
||||
if diskFormat == "eagerzeroedthick" {
|
||||
if eagerlyScrub == true && thinProvisioned == false {
|
||||
|
@ -144,7 +144,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
||||
isFound = true
|
||||
}
|
||||
}
|
||||
gomega.Expect(isFound).To(gomega.BeTrue(), "Unable to verify MountVolume.MountDevice failure")
|
||||
framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure")
|
||||
}
|
||||
|
||||
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
|
||||
|
@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
|
||||
ginkgo.By("Get total Ready nodes")
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test")
|
||||
framework.ExpectEqual(len(nodeList.Items) > 1, true, "At least 2 nodes are required for this test")
|
||||
|
||||
totalNodesCount := len(nodeList.Items)
|
||||
nodeVM := nodeList.Items[0]
|
||||
@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
|
||||
|
||||
// Ready nodes should be 1 less
|
||||
ginkgo.By("Verifying the ready node counts")
|
||||
gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(gomega.BeTrue(), "Unable to verify expected ready node count")
|
||||
framework.ExpectEqual(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1), true, "Unable to verify expected ready node count")
|
||||
|
||||
nodeList, err = e2enode.GetReadySchedulableNodes(client)
|
||||
framework.ExpectNoError(err)
|
||||
@ -96,7 +96,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
|
||||
|
||||
// Ready nodes should be equal to earlier count
|
||||
ginkgo.By("Verifying the ready node counts")
|
||||
gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(gomega.BeTrue(), "Unable to verify expected ready node count")
|
||||
framework.ExpectEqual(verifyReadyNodeCount(f.ClientSet, totalNodesCount), true, "Unable to verify expected ready node count")
|
||||
|
||||
nodeList, err = e2enode.GetReadySchedulableNodes(client)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test")
|
||||
framework.ExpectEqual(len(nodeList.Items) > 1, true, "At least 2 nodes are required for this test")
|
||||
})
|
||||
|
||||
/*
|
||||
@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
||||
ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
|
||||
isAttached, err := diskIsAttached(volumePath, node1)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(isAttached).To(gomega.BeTrue(), "Disk is not attached to the node")
|
||||
framework.ExpectEqual(isAttached, true, "Disk is not attached to the node")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Power off the node: %v", node1))
|
||||
|
||||
|
@ -352,5 +352,6 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
|
||||
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
|
||||
gomega.Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(gomega.BeTrue(), errorMsg)
|
||||
isVMPresentFlag, _ := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)
|
||||
framework.ExpectNotEqual(isVMPresentFlag, true, errorMsg)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user