mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #111311 from SD-13/improve_output_msg_1
Changed code to improve output for files under test/e2e/storage
This commit is contained in:
commit
2c4841796a
@ -684,7 +684,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
sc, pvc, pod := createPod(pvcReference)
|
sc, pvc, pod := createPod(pvcReference)
|
||||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
||||||
|
|
||||||
framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion")
|
if !*sc.AllowVolumeExpansion {
|
||||||
|
framework.Fail("failed creating sc with allowed expansion")
|
||||||
|
}
|
||||||
|
|
||||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||||
@ -777,7 +779,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
sc, pvc, pod := createPod(pvcReference)
|
sc, pvc, pod := createPod(pvcReference)
|
||||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
||||||
|
|
||||||
framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion")
|
if !*sc.AllowVolumeExpansion {
|
||||||
|
framework.Fail("failed creating sc with allowed expansion")
|
||||||
|
}
|
||||||
|
|
||||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||||
|
@ -124,7 +124,9 @@ func (n *nfsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
|||||||
|
|
||||||
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
nv, ok := e2evolume.(*nfsVolume)
|
nv, ok := e2evolume.(*nfsVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the NFS test volume", e2evolume)
|
||||||
|
}
|
||||||
return &v1.VolumeSource{
|
return &v1.VolumeSource{
|
||||||
NFS: &v1.NFSVolumeSource{
|
NFS: &v1.NFSVolumeSource{
|
||||||
Server: nv.serverHost,
|
Server: nv.serverHost,
|
||||||
@ -136,7 +138,9 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume stor
|
|||||||
|
|
||||||
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
nv, ok := e2evolume.(*nfsVolume)
|
nv, ok := e2evolume.(*nfsVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the NFS test volume", e2evolume)
|
||||||
|
}
|
||||||
return &v1.PersistentVolumeSource{
|
return &v1.PersistentVolumeSource{
|
||||||
NFS: &v1.NFSVolumeSource{
|
NFS: &v1.NFSVolumeSource{
|
||||||
Server: nv.serverHost,
|
Server: nv.serverHost,
|
||||||
@ -264,7 +268,9 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPatte
|
|||||||
|
|
||||||
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
gv, ok := e2evolume.(*glusterVolume)
|
gv, ok := e2evolume.(*glusterVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
|
if !ok {
|
||||||
|
framework.Failf("failed to cast test volume type %T to the Gluster test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
name := gv.prefix + "-server"
|
name := gv.prefix + "-server"
|
||||||
return &v1.VolumeSource{
|
return &v1.VolumeSource{
|
||||||
@ -279,7 +285,9 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum
|
|||||||
|
|
||||||
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
gv, ok := e2evolume.(*glusterVolume)
|
gv, ok := e2evolume.(*glusterVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
|
if !ok {
|
||||||
|
framework.Failf("failed to cast test volume of type %T to the Gluster test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
name := gv.prefix + "-server"
|
name := gv.prefix + "-server"
|
||||||
return &v1.PersistentVolumeSource{
|
return &v1.PersistentVolumeSource{
|
||||||
@ -399,7 +407,9 @@ func (i *iSCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern)
|
|||||||
|
|
||||||
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
iv, ok := e2evolume.(*iSCSIVolume)
|
iv, ok := e2evolume.(*iSCSIVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
|
if !ok {
|
||||||
|
framework.Failf("failed to cast test volume of type %T to the iSCSI test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
volSource := v1.VolumeSource{
|
volSource := v1.VolumeSource{
|
||||||
ISCSI: &v1.ISCSIVolumeSource{
|
ISCSI: &v1.ISCSIVolumeSource{
|
||||||
@ -417,7 +427,9 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume st
|
|||||||
|
|
||||||
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
iv, ok := e2evolume.(*iSCSIVolume)
|
iv, ok := e2evolume.(*iSCSIVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
|
if !ok {
|
||||||
|
framework.Failf("failed to cast test volume of type %T to the iSCSI test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
ISCSI: &v1.ISCSIPersistentVolumeSource{
|
ISCSI: &v1.ISCSIPersistentVolumeSource{
|
||||||
@ -576,7 +588,9 @@ func (r *rbdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
|||||||
|
|
||||||
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
rv, ok := e2evolume.(*rbdVolume)
|
rv, ok := e2evolume.(*rbdVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
|
if !ok {
|
||||||
|
framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
volSource := v1.VolumeSource{
|
volSource := v1.VolumeSource{
|
||||||
RBD: &v1.RBDVolumeSource{
|
RBD: &v1.RBDVolumeSource{
|
||||||
@ -598,7 +612,9 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume stor
|
|||||||
|
|
||||||
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
rv, ok := e2evolume.(*rbdVolume)
|
rv, ok := e2evolume.(*rbdVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
|
if !ok {
|
||||||
|
framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
f := rv.f
|
f := rv.f
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
@ -699,7 +715,9 @@ func (c *cephFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern)
|
|||||||
|
|
||||||
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
cv, ok := e2evolume.(*cephVolume)
|
cv, ok := e2evolume.(*cephVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Ceph test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
return &v1.VolumeSource{
|
return &v1.VolumeSource{
|
||||||
CephFS: &v1.CephFSVolumeSource{
|
CephFS: &v1.CephFSVolumeSource{
|
||||||
@ -715,7 +733,9 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume s
|
|||||||
|
|
||||||
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
cv, ok := e2evolume.(*cephVolume)
|
cv, ok := e2evolume.(*cephVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Ceph test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
ns := cv.f.Namespace
|
ns := cv.f.Namespace
|
||||||
|
|
||||||
@ -873,7 +893,9 @@ func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern storageframework.Tes
|
|||||||
|
|
||||||
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
hv, ok := e2evolume.(*hostPathSymlinkVolume)
|
hv, ok := e2evolume.(*hostPathSymlinkVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Hostpath Symlink test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
// hostPathSymlink doesn't support readOnly volume
|
// hostPathSymlink doesn't support readOnly volume
|
||||||
if readOnly {
|
if readOnly {
|
||||||
@ -1134,7 +1156,9 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern)
|
|||||||
|
|
||||||
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
gv, ok := e2evolume.(*gcePdVolume)
|
gv, ok := e2evolume.(*gcePdVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the GCE PD test volume", e2evolume)
|
||||||
|
}
|
||||||
volSource := v1.VolumeSource{
|
volSource := v1.VolumeSource{
|
||||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||||
PDName: gv.volumeName,
|
PDName: gv.volumeName,
|
||||||
@ -1149,7 +1173,9 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume st
|
|||||||
|
|
||||||
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
gv, ok := e2evolume.(*gcePdVolume)
|
gv, ok := e2evolume.(*gcePdVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the GCE PD test volume", e2evolume)
|
||||||
|
}
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||||
PDName: gv.volumeName,
|
PDName: gv.volumeName,
|
||||||
@ -1268,7 +1294,9 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern storageframework.TestPattern
|
|||||||
|
|
||||||
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
vsv, ok := e2evolume.(*vSphereVolume)
|
vsv, ok := e2evolume.(*vSphereVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the cSphere test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
// vSphere driver doesn't seem to support readOnly volume
|
// vSphere driver doesn't seem to support readOnly volume
|
||||||
// TODO: check if it is correct
|
// TODO: check if it is correct
|
||||||
@ -1288,7 +1316,9 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume
|
|||||||
|
|
||||||
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
vsv, ok := e2evolume.(*vSphereVolume)
|
vsv, ok := e2evolume.(*vSphereVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the vSphere test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
// vSphere driver doesn't seem to support readOnly volume
|
// vSphere driver doesn't seem to support readOnly volume
|
||||||
// TODO: check if it is correct
|
// TODO: check if it is correct
|
||||||
@ -1407,7 +1437,9 @@ func (a *azureDiskDriver) SkipUnsupportedTest(pattern storageframework.TestPatte
|
|||||||
|
|
||||||
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
av, ok := e2evolume.(*azureDiskVolume)
|
av, ok := e2evolume.(*azureDiskVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
|
||||||
|
}
|
||||||
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
|
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
|
||||||
|
|
||||||
kind := v1.AzureManagedDisk
|
kind := v1.AzureManagedDisk
|
||||||
@ -1427,7 +1459,9 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum
|
|||||||
|
|
||||||
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
av, ok := e2evolume.(*azureDiskVolume)
|
av, ok := e2evolume.(*azureDiskVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
|
||||||
|
}
|
||||||
|
|
||||||
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
|
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
|
||||||
|
|
||||||
@ -1552,7 +1586,9 @@ func (a *awsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
|||||||
|
|
||||||
func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
av, ok := e2evolume.(*awsVolume)
|
av, ok := e2evolume.(*awsVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the AWS test volume", e2evolume)
|
||||||
|
}
|
||||||
volSource := v1.VolumeSource{
|
volSource := v1.VolumeSource{
|
||||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||||
VolumeID: av.volumeName,
|
VolumeID: av.volumeName,
|
||||||
@ -1567,7 +1603,9 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume stor
|
|||||||
|
|
||||||
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
av, ok := e2evolume.(*awsVolume)
|
av, ok := e2evolume.(*awsVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the AWS test volume", e2evolume)
|
||||||
|
}
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||||
VolumeID: av.volumeName,
|
VolumeID: av.volumeName,
|
||||||
@ -1808,7 +1846,9 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity
|
|||||||
|
|
||||||
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
lv, ok := e2evolume.(*localVolume)
|
lv, ok := e2evolume.(*localVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the local test volume", e2evolume)
|
||||||
|
}
|
||||||
return &v1.PersistentVolumeSource{
|
return &v1.PersistentVolumeSource{
|
||||||
Local: &v1.LocalVolumeSource{
|
Local: &v1.LocalVolumeSource{
|
||||||
Path: lv.ltr.Path,
|
Path: lv.ltr.Path,
|
||||||
@ -1909,7 +1949,9 @@ func (a *azureFileDriver) SkipUnsupportedTest(pattern storageframework.TestPatte
|
|||||||
|
|
||||||
func (a *azureFileDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
func (a *azureFileDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||||
av, ok := e2evolume.(*azureFileVolume)
|
av, ok := e2evolume.(*azureFileVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
|
||||||
|
}
|
||||||
volSource := v1.VolumeSource{
|
volSource := v1.VolumeSource{
|
||||||
AzureFile: &v1.AzureFileVolumeSource{
|
AzureFile: &v1.AzureFileVolumeSource{
|
||||||
SecretName: av.secretName,
|
SecretName: av.secretName,
|
||||||
@ -1922,7 +1964,9 @@ func (a *azureFileDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum
|
|||||||
|
|
||||||
func (a *azureFileDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (a *azureFileDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
av, ok := e2evolume.(*azureFileVolume)
|
av, ok := e2evolume.(*azureFileVolume)
|
||||||
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
|
if !ok {
|
||||||
|
framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
|
||||||
|
}
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
AzureFile: &v1.AzureFilePersistentVolumeSource{
|
AzureFile: &v1.AzureFilePersistentVolumeSource{
|
||||||
SecretName: av.secretName,
|
SecretName: av.secretName,
|
||||||
|
4
test/e2e/storage/external/external.go
vendored
4
test/e2e/storage/external/external.go
vendored
@ -293,7 +293,9 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storagefra
|
|||||||
framework.ExpectNoError(err, "patch items")
|
framework.ExpectNoError(err, "patch items")
|
||||||
|
|
||||||
sc, ok = items[0].(*storagev1.StorageClass)
|
sc, ok = items[0].(*storagev1.StorageClass)
|
||||||
framework.ExpectEqual(ok, true, "storage class from %s", d.StorageClass.FromFile)
|
if !ok {
|
||||||
|
framework.Failf("storage class from %s", d.StorageClass.FromFile)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil")
|
framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil")
|
||||||
|
@ -99,7 +99,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
|
|||||||
fmt.Printf("storage class creation error: %v\n", err)
|
fmt.Printf("storage class creation error: %v\n", err)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err, "Error creating resizable storage class")
|
framework.ExpectNoError(err, "Error creating resizable storage class")
|
||||||
framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true)
|
if !*resizableSc.AllowVolumeExpansion {
|
||||||
|
framework.Failf("Class %s does not allow volume expansion", resizableSc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||||
StorageClassName: &(resizableSc.Name),
|
StorageClassName: &(resizableSc.Name),
|
||||||
|
@ -94,7 +94,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
|
|||||||
fmt.Printf("storage class creation error: %v\n", err)
|
fmt.Printf("storage class creation error: %v\n", err)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err, "Error creating resizable storage class: %v", err)
|
framework.ExpectNoError(err, "Error creating resizable storage class: %v", err)
|
||||||
framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true)
|
if !*resizableSc.AllowVolumeExpansion {
|
||||||
|
framework.Failf("Class %s does not allow volume expansion", resizableSc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||||
StorageClassName: &(resizableSc.Name),
|
StorageClassName: &(resizableSc.Name),
|
||||||
|
@ -90,7 +90,9 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
|
|||||||
}
|
}
|
||||||
|
|
||||||
sc, cleanStorageClass = testsuites.SetupStorageClass(c, newStorageClass(test, ns, "resizing"))
|
sc, cleanStorageClass = testsuites.SetupStorageClass(c, newStorageClass(test, ns, "resizing"))
|
||||||
framework.ExpectEqual(*sc.AllowVolumeExpansion, true)
|
if !*sc.AllowVolumeExpansion {
|
||||||
|
framework.Failf("Class %s does not allow volume expansion", sc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||||
ClaimSize: test.ClaimSize,
|
ClaimSize: test.ClaimSize,
|
||||||
|
@ -835,7 +835,9 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType,
|
|||||||
vols := []*localTestVolume{}
|
vols := []*localTestVolume{}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ltrType, ok := setupLocalVolumeMap[localVolumeType]
|
ltrType, ok := setupLocalVolumeMap[localVolumeType]
|
||||||
framework.ExpectEqual(ok, true)
|
if !ok {
|
||||||
|
framework.Failf("Invalid localVolumeType: %v", localVolumeType)
|
||||||
|
}
|
||||||
ltr := config.ltrMgr.Create(node, ltrType, nil)
|
ltr := config.ltrMgr.Create(node, ltrType, nil)
|
||||||
vols = append(vols, &localTestVolume{
|
vols = append(vols, &localTestVolume{
|
||||||
ltr: ltr,
|
ltr: ltr,
|
||||||
|
@ -229,7 +229,9 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||||||
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
|
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pod := getPod(c, ns, regionalPDLabels)
|
pod := getPod(c, ns, regionalPDLabels)
|
||||||
framework.ExpectEqual(podutil.IsPodReadyConditionTrue(pod.Status), true, "The statefulset pod has the following conditions: %s", pod.Status.Conditions)
|
if !podutil.IsPodReadyConditionTrue(pod.Status) {
|
||||||
|
framework.Failf("The statefulset pod %s was expected to be ready, instead has the following conditions: %v", pod.Name, pod.Status.Conditions)
|
||||||
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -279,7 +281,9 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||||||
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
|
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pod := getPod(c, ns, regionalPDLabels)
|
pod := getPod(c, ns, regionalPDLabels)
|
||||||
framework.ExpectEqual(podutil.IsPodReadyConditionTrue(pod.Status), true, "The statefulset pod has the following conditions: %s", pod.Status.Conditions)
|
if !podutil.IsPodReadyConditionTrue(pod.Status) {
|
||||||
|
framework.Failf("The statefulset pod %s was expected to be ready, instead has the following conditions: %v", pod.Name, pod.Status.Conditions)
|
||||||
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -701,7 +701,9 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.ExpectEqual(found, true)
|
if !found {
|
||||||
|
framework.Failf("Actual access modes %v are not in claim's access mode", pv.Spec.AccessModes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectEqual(pv.Spec.ClaimRef.Name, claim.ObjectMeta.Name)
|
framework.ExpectEqual(pv.Spec.ClaimRef.Name, claim.ObjectMeta.Name)
|
||||||
|
@ -19,13 +19,14 @@ package testsuites
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -219,7 +220,9 @@ func createPerformanceStats(stats *performanceStats, provisionCount int, pvcs []
|
|||||||
var min, max, sum time.Duration
|
var min, max, sum time.Duration
|
||||||
for _, pvc := range pvcs {
|
for _, pvc := range pvcs {
|
||||||
pvcMetric, ok := stats.perObjectInterval[pvc.Name]
|
pvcMetric, ok := stats.perObjectInterval[pvc.Name]
|
||||||
framework.ExpectEqual(ok, true)
|
if !ok {
|
||||||
|
framework.Failf("PVC %s not found in perObjectInterval", pvc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
elapsedTime := pvcMetric.elapsed
|
elapsedTime := pvcMetric.elapsed
|
||||||
sum += elapsedTime
|
sum += elapsedTime
|
||||||
@ -271,7 +274,9 @@ func newPVCWatch(f *framework.Framework, provisionCount int, pvcMetrics *perform
|
|||||||
// Check if PVC entered the bound state
|
// Check if PVC entered the bound state
|
||||||
if oldPVC.Status.Phase != v1.ClaimBound && newPVC.Status.Phase == v1.ClaimBound {
|
if oldPVC.Status.Phase != v1.ClaimBound && newPVC.Status.Phase == v1.ClaimBound {
|
||||||
newPVCInterval, ok := pvcMetrics.perObjectInterval[newPVC.Name]
|
newPVCInterval, ok := pvcMetrics.perObjectInterval[newPVC.Name]
|
||||||
framework.ExpectEqual(ok, true, "PVC %s should exist in interval map already", newPVC.Name)
|
if !ok {
|
||||||
|
framework.Failf("PVC %s should exist in interval map already", newPVC.Name)
|
||||||
|
}
|
||||||
count++
|
count++
|
||||||
newPVCInterval.enterDesiredState = now
|
newPVCInterval.enterDesiredState = now
|
||||||
newPVCInterval.elapsed = now.Sub(newPVCInterval.create)
|
newPVCInterval.elapsed = now.Sub(newPVCInterval.create)
|
||||||
@ -299,9 +304,13 @@ func newPVCWatch(f *framework.Framework, provisionCount int, pvcMetrics *perform
|
|||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
oldPVC, ok := oldObj.(*v1.PersistentVolumeClaim)
|
oldPVC, ok := oldObj.(*v1.PersistentVolumeClaim)
|
||||||
framework.ExpectEqual(ok, true)
|
if !ok {
|
||||||
|
framework.Failf("Expected a PVC, got instead an old object of type %T", oldObj)
|
||||||
|
}
|
||||||
newPVC, ok := newObj.(*v1.PersistentVolumeClaim)
|
newPVC, ok := newObj.(*v1.PersistentVolumeClaim)
|
||||||
framework.ExpectEqual(ok, true)
|
if !ok {
|
||||||
|
framework.Failf("Expected a PVC, got instead a new object of type %T", newObj)
|
||||||
|
}
|
||||||
|
|
||||||
checkPVCBound(oldPVC, newPVC)
|
checkPVCBound(oldPVC, newPVC)
|
||||||
},
|
},
|
||||||
|
@ -152,7 +152,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
|
if !isPidChanged {
|
||||||
|
framework.Fail("Kubelet PID remained unchanged after restarting Kubelet")
|
||||||
|
}
|
||||||
|
|
||||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
}
|
}
|
||||||
|
@ -264,7 +264,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
for _, key := range volumeStatKeys {
|
for _, key := range volumeStatKeys {
|
||||||
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
|
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
|
||||||
found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics)
|
found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics)
|
||||||
framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName)
|
if !found {
|
||||||
|
framework.Failf("PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||||
@ -328,7 +330,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
for _, key := range volumeStatKeys {
|
for _, key := range volumeStatKeys {
|
||||||
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
|
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
|
||||||
found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics)
|
found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics)
|
||||||
framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName)
|
if !found {
|
||||||
|
framework.Failf("PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||||
@ -428,7 +432,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||||||
// Forced detach metric should be present
|
// Forced detach metric should be present
|
||||||
forceDetachKey := "attachdetach_controller_forced_detaches"
|
forceDetachKey := "attachdetach_controller_forced_detaches"
|
||||||
_, ok := updatedControllerMetrics[forceDetachKey]
|
_, ok := updatedControllerMetrics[forceDetachKey]
|
||||||
framework.ExpectEqual(ok, true, "Key %q not found in A/D Controller metrics", forceDetachKey)
|
if !ok {
|
||||||
|
framework.Failf("Key %q not found in A/D Controller metrics", forceDetachKey)
|
||||||
|
}
|
||||||
|
|
||||||
// Wait and validate
|
// Wait and validate
|
||||||
totalVolumesKey := "attachdetach_controller_total_volumes"
|
totalVolumesKey := "attachdetach_controller_total_volumes"
|
||||||
@ -715,10 +721,14 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN
|
|||||||
|
|
||||||
newLatencyCount, ok := newMetrics.latencyMetrics[metricName]
|
newLatencyCount, ok := newMetrics.latencyMetrics[metricName]
|
||||||
if !expectFailure {
|
if !expectFailure {
|
||||||
framework.ExpectEqual(ok, true, "Error getting updated latency metrics for %s", metricName)
|
if !ok {
|
||||||
|
framework.Failf("Error getting updated latency metrics for %s", metricName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
newStatusCounts, ok := newMetrics.statusMetrics[metricName]
|
newStatusCounts, ok := newMetrics.statusMetrics[metricName]
|
||||||
framework.ExpectEqual(ok, true, "Error getting updated status metrics for %s", metricName)
|
if !ok {
|
||||||
|
framework.Failf("Error getting updated status metrics for %s", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
newStatusCount := int64(0)
|
newStatusCount := int64(0)
|
||||||
if expectFailure {
|
if expectFailure {
|
||||||
|
@ -108,7 +108,9 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
|
|||||||
ginkgo.By("Verify disk should be attached to the node")
|
ginkgo.By("Verify disk should be attached to the node")
|
||||||
isAttached, err := diskIsAttached(volumePath, node)
|
isAttached, err := diskIsAttached(volumePath, node)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(isAttached, true, "disk is not attached with the node")
|
if !isAttached {
|
||||||
|
framework.Failf("Disk %s is not attached with the node", volumePath)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
|
@ -131,7 +131,9 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
|||||||
ginkgo.By("Verify the volume is attached to the node")
|
ginkgo.By("Verify the volume is attached to the node")
|
||||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||||
framework.ExpectNoError(verifyDiskAttachedError)
|
framework.ExpectNoError(verifyDiskAttachedError)
|
||||||
framework.ExpectEqual(isVolumeAttached, true)
|
if !isVolumeAttached {
|
||||||
|
framework.Failf("Disk %s is not attached with the node %s", pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("Verify the volume is accessible and available in the pod")
|
ginkgo.By("Verify the volume is accessible and available in the pod")
|
||||||
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
|
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
|
||||||
|
@ -86,8 +86,12 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
|||||||
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
|
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
|
||||||
|
|
||||||
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
|
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
|
||||||
framework.ExpectNotEqual(numberOfInstances > 5, true, "Maximum allowed instances are 5")
|
if numberOfInstances > 5 {
|
||||||
framework.ExpectNotEqual(numberOfInstances > volumeCount, true, "Number of instances should be less than the total volume count")
|
framework.Failf("Maximum 5 instances allowed, got instead: %v", numberOfInstances)
|
||||||
|
}
|
||||||
|
if numberOfInstances > volumeCount {
|
||||||
|
framework.Failf("Number of instances: %v cannot be greater than volume count: %v", numberOfInstances, volumeCount)
|
||||||
|
}
|
||||||
|
|
||||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||||
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||||
|
@ -113,7 +113,9 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
|
|||||||
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
||||||
_, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{})
|
_, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
if !apierrors.IsNotFound(err) {
|
||||||
|
framework.Failf("Error in getting Pod %s: %v", sspod.Name, err)
|
||||||
|
}
|
||||||
for _, volumespec := range sspod.Spec.Volumes {
|
for _, volumespec := range sspod.Spec.Volumes {
|
||||||
if volumespec.PersistentVolumeClaim != nil {
|
if volumespec.PersistentVolumeClaim != nil {
|
||||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||||
@ -146,9 +148,13 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
|
|||||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||||
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||||
// Verify scale up has re-attached the same volumes and not introduced new volume
|
// Verify scale up has re-attached the same volumes and not introduced new volume
|
||||||
framework.ExpectEqual(volumesBeforeScaleDown[vSpherediskPath] == "", false)
|
if volumesBeforeScaleDown[vSpherediskPath] == "" {
|
||||||
|
framework.Failf("Volume: %q was not attached to the Node: %q before scale down", vSpherediskPath, sspod.Spec.NodeName)
|
||||||
|
}
|
||||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
|
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
|
||||||
framework.ExpectEqual(isVolumeAttached, true)
|
if !isVolumeAttached {
|
||||||
|
framework.Failf("Volume: %q is not attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||||
|
}
|
||||||
framework.ExpectNoError(verifyDiskAttachedError)
|
framework.ExpectNoError(verifyDiskAttachedError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,11 +72,17 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
|||||||
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
|
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
|
||||||
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
|
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
|
||||||
instances = GetAndExpectIntEnvVar(VCPStressInstances)
|
instances = GetAndExpectIntEnvVar(VCPStressInstances)
|
||||||
framework.ExpectEqual(instances <= volumesPerNode*len(nodeList.Items), true, fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
|
if instances > volumesPerNode*len(nodeList.Items) {
|
||||||
framework.ExpectEqual(instances > len(scNames), true, "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
|
framework.Failf("Number of Instances should be less or equal: %v, got instead %v", volumesPerNode*len(nodeList.Items), instances)
|
||||||
|
}
|
||||||
|
if instances <= len(scNames) {
|
||||||
|
framework.Failf("VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes, got instead %v", instances)
|
||||||
|
}
|
||||||
|
|
||||||
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
|
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
|
||||||
framework.ExpectEqual(iterations > 0, true, "VCP_STRESS_ITERATIONS should be greater than 0")
|
if iterations <= 0 {
|
||||||
|
framework.Failf("VCP_STRESS_ITERATIONS should be greater than 0, got instead %v", iterations)
|
||||||
|
}
|
||||||
|
|
||||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||||
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||||
@ -160,7 +166,9 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
|||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
||||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||||
framework.ExpectEqual(isVolumeAttached, true)
|
if !isVolumeAttached {
|
||||||
|
framework.Failf("Volume: %s is not attached to the node: %v", persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||||
|
}
|
||||||
framework.ExpectNoError(verifyDiskAttachedError)
|
framework.ExpectNoError(verifyDiskAttachedError)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
|
ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
|
||||||
|
@ -378,7 +378,9 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
|
|||||||
// Verify disks are attached to the node
|
// Verify disks are attached to the node
|
||||||
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
|
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
|
if !isAttached {
|
||||||
|
framework.Failf("disk %v is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName)
|
||||||
|
}
|
||||||
// Verify Volumes are accessible
|
// Verify Volumes are accessible
|
||||||
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
|
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
|
||||||
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
|
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
|
||||||
@ -788,7 +790,9 @@ func invokeVCenterServiceControl(command, service, host string) error {
|
|||||||
func expectVolumeToBeAttached(nodeName, volumePath string) {
|
func expectVolumeToBeAttached(nodeName, volumePath string) {
|
||||||
isAttached, err := diskIsAttached(volumePath, nodeName)
|
isAttached, err := diskIsAttached(volumePath, nodeName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk: %s is not attached with the node", volumePath))
|
if !isAttached {
|
||||||
|
framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// expectVolumesToBeAttached checks if the given Volumes are attached to the
|
// expectVolumesToBeAttached checks if the given Volumes are attached to the
|
||||||
|
@ -152,7 +152,9 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
|||||||
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
|
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
|
||||||
|
|
||||||
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
|
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
|
||||||
framework.ExpectEqual(isAttached, true)
|
if !isAttached {
|
||||||
|
framework.Failf("Volume: %s is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName)
|
||||||
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Verify Disk Format")
|
ginkgo.By("Verify Disk Format")
|
||||||
@ -198,7 +200,9 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectEqual(diskFound, true, "Failed to find disk")
|
if !diskFound {
|
||||||
|
framework.Failf("Failed to find disk: %s", pvVolumePath)
|
||||||
|
}
|
||||||
isDiskFormatCorrect := false
|
isDiskFormatCorrect := false
|
||||||
if diskFormat == "eagerzeroedthick" {
|
if diskFormat == "eagerzeroedthick" {
|
||||||
if eagerlyScrub == true && thinProvisioned == false {
|
if eagerlyScrub == true && thinProvisioned == false {
|
||||||
|
@ -149,7 +149,9 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
|||||||
isFound = true
|
isFound = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure")
|
if !isFound {
|
||||||
|
framework.Failf("Unable to verify MountVolume.MountDevice failure for volume %s", persistentvolumes[0].Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
|
func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
|
||||||
|
@ -55,7 +55,9 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
|
|||||||
ginkgo.By("Get total Ready nodes")
|
ginkgo.By("Get total Ready nodes")
|
||||||
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(len(nodeList.Items) > 1, true, "At least 2 nodes are required for this test")
|
if len(nodeList.Items) < 2 {
|
||||||
|
framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items))
|
||||||
|
}
|
||||||
|
|
||||||
totalNodesCount := len(nodeList.Items)
|
totalNodesCount := len(nodeList.Items)
|
||||||
nodeVM := nodeList.Items[0]
|
nodeVM := nodeList.Items[0]
|
||||||
|
@ -61,7 +61,9 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
|||||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
||||||
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(len(nodeList.Items) > 1, true, "At least 2 nodes are required for this test")
|
if len(nodeList.Items) < 2 {
|
||||||
|
framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -113,7 +115,9 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
|||||||
ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
|
ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
|
||||||
isAttached, err := diskIsAttached(volumePath, node1)
|
isAttached, err := diskIsAttached(volumePath, node1)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(isAttached, true, "Disk is not attached to the node")
|
if !isAttached {
|
||||||
|
framework.Failf("Volume: %s is not attached to the node: %v", volumePath, node1)
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Power off the node: %v", node1))
|
ginkgo.By(fmt.Sprintf("Power off the node: %v", node1))
|
||||||
|
|
||||||
|
@ -374,7 +374,9 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
|
|||||||
for _, volumePath := range volumePaths {
|
for _, volumePath := range volumePaths {
|
||||||
isAttached, err := diskIsAttached(volumePath, nodeName)
|
isAttached, err := diskIsAttached(volumePath, nodeName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(isAttached, true, "disk:"+volumePath+" is not attached with the node")
|
if !isAttached {
|
||||||
|
framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,9 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control
|
|||||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(controlPlaneNode)
|
nodeInfo := TestContext.NodeMapper.GetNodeInfo(controlPlaneNode)
|
||||||
isVMPresentFlag, err := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)
|
isVMPresentFlag, err := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(isVMPresentFlag, false, errorMsg)
|
if isVMPresentFlag {
|
||||||
|
framework.Failf("VM with name %s is present, %s", dummyVMFullName, errorMsg)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getControlPlaneNode(client clientset.Interface) (string, error) {
|
func getControlPlaneNode(client clientset.Interface) (string, error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user