Merge pull request #85770 from tanjunchen/ExpectExpect-test-e2e-storage

use ExpectEqual of framework in test/e2e/storage
This commit is contained in:
Kubernetes Prow Robot 2019-12-03 21:50:56 -08:00 committed by GitHub
commit 99e4f6529f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 34 additions and 39 deletions

View File

@ -20,7 +20,6 @@ go_library(
"//test/e2e/storage/testsuites:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)

View File

@ -37,7 +37,6 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// List of testSuites to be executed for each external driver.
@ -280,7 +279,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.Pe
framework.ExpectNoError(err, "patch items")
sc, ok := items[0].(*storagev1.StorageClass)
gomega.Expect(ok).To(gomega.BeTrue(), "storage class from %s", d.StorageClass.FromFile)
framework.ExpectEqual(ok, true, "storage class from %s", d.StorageClass.FromFile)
// Ensure that we can load more than once as required for
// GetDynamicProvisionStorageClass by adding a random suffix.
sc.Name = names.SimpleNameGenerator.GenerateName(sc.Name + "-")

View File

@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
fmt.Printf("storage class creation error: %v\n", err)
}
framework.ExpectNoError(err, "Error creating resizable storage class")
gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true)
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
StorageClassName: &(resizableSc.Name),

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
fmt.Printf("storage class creation error: %v\n", err)
}
framework.ExpectNoError(err, "Error creating resizable storage class: %v", err)
gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true)
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
StorageClassName: &(resizableSc.Name),

View File

@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
}
resizableSc, err = c.StorageV1().StorageClasses().Create(newStorageClass(test, ns, "resizing"))
framework.ExpectNoError(err, "Error creating resizable storage class")
gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true)
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,

View File

@ -395,7 +395,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
gomega.Expect(true, strings.Contains(string(output), string(host0Name)))
framework.ExpectEqual(true, strings.Contains(string(output), string(host0Name)))
ginkgo.By("deleting host0")
err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
@ -405,7 +405,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
framework.ExpectEqual(numNodes, origNodeCnt, fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))
output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
gomega.Expect(false, strings.Contains(string(output), string(host0Name)))
framework.ExpectEqual(false, strings.Contains(string(output), string(host0Name)))
} else if disruptOp == deleteNodeObj {
ginkgo.By("deleting host0's node api object")

View File

@ -18,7 +18,6 @@ package storage
import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -125,7 +124,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
ginkgo.By("Deleting the Claim")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
framework.ExpectEqual(verifyGCEDiskAttached(diskName, node), true)
ginkgo.By("Deleting the Pod")
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name)
@ -140,7 +139,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
ginkgo.By("Deleting the Persistent Volume")
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
framework.ExpectEqual(verifyGCEDiskAttached(diskName, node), true)
ginkgo.By("Deleting the client pod")
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name)

View File

@ -806,7 +806,7 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType,
vols := []*localTestVolume{}
for i := 0; i < count; i++ {
ltrType, ok := setupLocalVolumeMap[localVolumeType]
gomega.Expect(ok).To(gomega.BeTrue())
framework.ExpectEqual(ok, true)
ltr := config.ltrMgr.Create(node, ltrType, nil)
vols = append(vols, &localTestVolume{
ltr: ltr,

View File

@ -324,7 +324,7 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
break
}
}
gomega.Expect(found).To(gomega.BeTrue())
framework.ExpectEqual(found, true)
}
framework.ExpectEqual(pv.Spec.ClaimRef.Name, claim.ObjectMeta.Name)

View File

@ -199,7 +199,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
break
}
}
gomega.Expect(isPidChanged).To(gomega.BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}

View File

@ -250,7 +250,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
for _, key := range volumeStatKeys {
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
found := findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics)
gomega.Expect(found).To(gomega.BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
}
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
@ -282,7 +282,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
metricKey := "volume_operation_total_seconds_count"
dimensions := []string{"operation_name", "plugin_name"}
valid := hasValidMetrics(testutil.Metrics(controllerMetrics), metricKey, dimensions...)
gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
framework.ExpectEqual(valid, true, "Invalid metric in P/V Controller metrics: %q", metricKey)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
@ -312,7 +312,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
totalVolumesKey := "volume_manager_total_volumes"
dimensions := []string{"state", "plugin_name"}
valid := hasValidMetrics(testutil.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
framework.ExpectEqual(valid, true, "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
@ -350,7 +350,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// Forced detach metric should be present
forceDetachKey := "attachdetach_controller_forced_detaches"
_, ok := updatedControllerMetrics[forceDetachKey]
gomega.Expect(ok).To(gomega.BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey)
framework.ExpectEqual(ok, true, "Key %q not found in A/D Controller metrics", forceDetachKey)
// Wait and validate
totalVolumesKey := "attachdetach_controller_total_volumes"
@ -588,10 +588,10 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN
newLatencyCount, ok := newMetrics.latencyMetrics[metricName]
if !expectFailure {
gomega.Expect(ok).To(gomega.BeTrue(), "Error getting updated latency metrics for %s", metricName)
framework.ExpectEqual(ok, true, "Error getting updated latency metrics for %s", metricName)
}
newStatusCounts, ok := newMetrics.statusMetrics[metricName]
gomega.Expect(ok).To(gomega.BeTrue(), "Error getting updated status metrics for %s", metricName)
framework.ExpectEqual(ok, true, "Error getting updated status metrics for %s", metricName)
newStatusCount := int64(0)
if expectFailure {

View File

@ -20,7 +20,6 @@ import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -105,7 +104,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
ginkgo.By("Verify disk should be attached to the node")
isAttached, err := diskIsAttached(volumePath, node)
framework.ExpectNoError(err)
gomega.Expect(isAttached).To(gomega.BeTrue(), "disk is not attached with the node")
framework.ExpectEqual(isAttached, true, "disk is not attached with the node")
})
ginkgo.AfterEach(func() {

View File

@ -21,7 +21,6 @@ import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -128,7 +127,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
ginkgo.By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
framework.ExpectNoError(verifyDiskAttachedError)
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
framework.ExpectEqual(isVolumeAttached, true)
ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})

View File

@ -82,8 +82,8 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
gomega.Expect(numberOfInstances > 5).NotTo(gomega.BeTrue(), "Maximum allowed instances are 5")
gomega.Expect(numberOfInstances > volumeCount).NotTo(gomega.BeTrue(), "Number of instances should be less than the total volume count")
framework.ExpectNotEqual(numberOfInstances > 5, true, "Maximum allowed instances are 5")
framework.ExpectNotEqual(numberOfInstances > volumeCount, true, "Number of instances should be less than the total volume count")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
framework.ExpectNoError(e2esset.CheckMount(client, statefulset, mountPath))
ssPodsBeforeScaleDown := e2esset.GetPodList(client, statefulset)
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")
// Get the list of Volumes attached to Pods before scale down
volumesBeforeScaleDown := make(map[string]string)
@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
if err != nil {
gomega.Expect(apierrs.IsNotFound(err), gomega.BeTrue())
framework.ExpectEqual(apierrs.IsNotFound(err), true)
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
@ -131,7 +131,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
ssPodsAfterScaleUp := e2esset.GetPodList(client, statefulset)
gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")
// After scale up, verify all vsphere volumes are attached to node VMs.
ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
@ -145,9 +145,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume
gomega.Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(gomega.BeFalse())
framework.ExpectEqual(volumesBeforeScaleDown[vSpherediskPath] == "", false)
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
framework.ExpectEqual(isVolumeAttached, true)
framework.ExpectNoError(verifyDiskAttachedError)
}
}

View File

@ -68,11 +68,11 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
instances = GetAndExpectIntEnvVar(VCPStressInstances)
gomega.Expect(instances <= volumesPerNode*len(nodeList.Items)).To(gomega.BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
gomega.Expect(instances > len(scNames)).To(gomega.BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
framework.ExpectEqual(instances <= volumesPerNode*len(nodeList.Items), true, fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
framework.ExpectEqual(instances > len(scNames), true, "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
gomega.Expect(iterations > 0).To(gomega.BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
framework.ExpectEqual(iterations > 0, true, "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
@ -155,7 +155,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
framework.ExpectEqual(isVolumeAttached, true)
framework.ExpectNoError(verifyDiskAttachedError)
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))

View File

@ -384,7 +384,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
// Verify disks are attached to the node
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
framework.ExpectNoError(err)
gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
@ -795,7 +795,7 @@ func invokeVCenterServiceControl(command, service, host string) error {
func expectVolumeToBeAttached(nodeName, volumePath string) {
isAttached, err := diskIsAttached(volumePath, nodeName)
framework.ExpectNoError(err)
gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk: %s is not attached with the node", volumePath))
}
// expectVolumesToBeAttached checks if the given Volumes are attached to the

View File

@ -149,7 +149,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
gomega.Expect(isAttached).To(gomega.BeTrue())
framework.ExpectEqual(isAttached, true)
framework.ExpectNoError(err)
ginkgo.By("Verify Disk Format")

View File

@ -369,7 +369,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
for _, volumePath := range volumePaths {
isAttached, err := diskIsAttached(volumePath, nodeName)
framework.ExpectNoError(err)
gomega.Expect(isAttached).To(gomega.BeTrue(), "disk:"+volumePath+" is not attached with the node")
framework.ExpectEqual(isAttached, true, "disk:"+volumePath+" is not attached with the node")
}
return pod
}