mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
Merge pull request #120306 from Rei1010/nodeClean
e2e_node:stop using deprecated framework.ExpectError
This commit is contained in:
commit
8aeebda818
@ -26,6 +26,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -438,7 +439,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
|
||||
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
|
||||
err := e2epod.DeletePodWithWait(ctx, c, pod)
|
||||
framework.ExpectError(err)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
// pod object is now stale, but is intentionally not nil
|
||||
|
||||
ginkgo.By("Check if pod's host has been cleaned up -- expect not")
|
||||
|
@ -182,7 +182,7 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
gomega.Expect(stdout).To(gomega.Equal(mountName), msg)
|
||||
} else {
|
||||
// We *expect* cat to return error here
|
||||
framework.ExpectError(err, msg)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1348,7 +1348,7 @@ func doPodResizeResourceQuotaTests() {
|
||||
ginkgo.By("patching pod for resize with memory exceeding resource quota")
|
||||
_, pErrExceedMemory := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(context.TODO(),
|
||||
resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedMemory), metav1.PatchOptions{})
|
||||
framework.ExpectError(pErrExceedMemory, "exceeded quota: %s, requested: memory=350Mi, used: memory=700Mi, limited: memory=800Mi",
|
||||
gomega.Expect(pErrExceedMemory).To(gomega.HaveOccurred(), "exceeded quota: %s, requested: memory=350Mi, used: memory=700Mi, limited: memory=800Mi",
|
||||
resourceQuota.Name)
|
||||
|
||||
ginkgo.By("verifying pod patched for resize exceeding memory resource quota remains unchanged")
|
||||
@ -1360,7 +1360,7 @@ func doPodResizeResourceQuotaTests() {
|
||||
ginkgo.By(fmt.Sprintf("patching pod %s for resize with CPU exceeding resource quota", resizedPod.Name))
|
||||
_, pErrExceedCPU := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(context.TODO(),
|
||||
resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedCPU), metav1.PatchOptions{})
|
||||
framework.ExpectError(pErrExceedCPU, "exceeded quota: %s, requested: cpu=200m, used: cpu=700m, limited: cpu=800m",
|
||||
gomega.Expect(pErrExceedCPU).To(gomega.HaveOccurred(), "exceeded quota: %s, requested: cpu=200m, used: cpu=700m, limited: cpu=800m",
|
||||
resourceQuota.Name)
|
||||
|
||||
ginkgo.By("verifying pod patched for resize exceeding CPU resource quota remains unchanged")
|
||||
@ -1447,7 +1447,7 @@ func doPodResizeErrorTests() {
|
||||
if tc.patchError == "" {
|
||||
framework.ExpectNoError(pErr, "failed to patch pod for resize")
|
||||
} else {
|
||||
framework.ExpectError(pErr, tc.patchError)
|
||||
gomega.Expect(pErr).To(gomega.HaveOccurred(), tc.patchError)
|
||||
patchedPod = newPod
|
||||
}
|
||||
|
||||
|
@ -322,6 +322,6 @@ func testPodSELinuxLabeling(ctx context.Context, f *framework.Framework, hostIPC
|
||||
isEnforced, err := tk.ReadFileViaContainer(pod.Name, "test-container", "/sys/fs/selinux/enforce")
|
||||
if err == nil && isEnforced == "1" {
|
||||
_, err = tk.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
|
||||
framework.ExpectError(err, "expecting SELinux to not let the container with different MCS label to read the file")
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "expecting SELinux to not let the container with different MCS label to read the file")
|
||||
}
|
||||
}
|
||||
|
@ -577,7 +577,7 @@ func podresourcesGetTests(ctx context.Context, f *framework.Framework, cli kubel
|
||||
expected := []podDesc{}
|
||||
resp, err := cli.Get(ctx, &kubeletpodresourcesv1.GetPodResourcesRequest{PodName: "test", PodNamespace: f.Namespace.Name})
|
||||
podResourceList := []*kubeletpodresourcesv1.PodResources{resp.GetPodResources()}
|
||||
framework.ExpectError(err, "pod not found")
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "pod not found")
|
||||
res := convertToMap(podResourceList)
|
||||
err = matchPodDescWithResources(expected, res)
|
||||
framework.ExpectNoError(err, "matchPodDescWithResources() failed err %v", err)
|
||||
@ -788,7 +788,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
ginkgo.By("checking Get fail if the feature gate is not enabled")
|
||||
getRes, err := cli.Get(ctx, &kubeletpodresourcesv1.GetPodResourcesRequest{PodName: "test", PodNamespace: f.Namespace.Name})
|
||||
framework.Logf("Get result: %v, err: %v", getRes, err)
|
||||
framework.ExpectError(err, "With feature gate disabled, the call must fail")
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "With feature gate disabled, the call must fail")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user