mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
replace test error checking with more readable way
This commit is contained in:
parent
1058877fbf
commit
629ec7e113
@ -313,9 +313,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
ginkgo.By("Creating local PVC and PV")
|
||||
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
|
||||
pod, err := createLocalPod(config, testVol, nil)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
|
||||
})
|
||||
|
||||
@ -332,7 +332,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
cleanupLocalVolumes(config, []*localTestVolume{testVol})
|
||||
})
|
||||
@ -932,7 +932,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
|
||||
}()
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
})
|
||||
} else {
|
||||
ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||
@ -251,7 +251,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
})
|
||||
} else {
|
||||
ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||
|
@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
|
||||
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
|
@ -573,7 +573,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
// The claim should timeout phase:Pending
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
e2elog.Logf(err.Error())
|
||||
})
|
||||
|
||||
@ -800,7 +800,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
// The claim should timeout phase:Pending
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
e2elog.Logf(err.Error())
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -834,7 +834,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
// The claim should timeout phase:Pending
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
e2elog.Logf(err.Error())
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -69,7 +68,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
|
||||
scParameters[Datastore] = InvalidDatastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found`
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -90,7 +89,7 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string,
|
||||
|
||||
ginkgo.By("Expect claim to fail provisioning volume")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
|
@ -126,7 +126,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
|
||||
|
||||
|
@ -159,7 +159,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -188,7 +188,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -204,7 +204,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[Datastore] = VmfsDatastore
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||
"The policy parameters will work only with VSAN Datastore."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
@ -236,7 +236,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -249,7 +249,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -317,7 +317,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
|
||||
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
@ -337,7 +337,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
ginkgo.By("Expect claim to fail provisioning volume")
|
||||
_, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -132,7 +132,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD))
|
||||
zones = append(zones, zoneD)
|
||||
err := verifyPVCCreationFails(client, namespace, nil, zones)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -357,7 +357,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
|
||||
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
e2elog.Logf("Failure message : %+q", eventList.Items[0].Message)
|
||||
|
Loading…
Reference in New Issue
Block a user