mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Use framework.ExpectNoError() for e2e/node
This commit is contained in:
parent
b066e0d783
commit
f2a7650ede
@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
|
||||
},
|
||||
}
|
||||
rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = f.WaitForPodReady(rtnPod.Name) // running & ready
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
return rtnPod
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
||||
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
|
||||
// use ip rather than hostname in GCE
|
||||
nodeIP, err := framework.GetHostExternalAddress(c, pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
condMsg := "deleted"
|
||||
if !expectClean {
|
||||
@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
||||
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
|
||||
err = wait.Poll(poll, timeout, func() (bool, error) {
|
||||
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
e2essh.LogResult(result)
|
||||
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
|
||||
if expectClean && ok { // keep trying
|
||||
@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
||||
}
|
||||
return true, nil // done, host is as expected
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
|
||||
}
|
||||
|
||||
if expectClean {
|
||||
@ -317,20 +317,21 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
|
||||
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
|
||||
|
||||
gomega.Expect(framework.RunRC(testutils.RCConfig{
|
||||
err := framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
NodeSelector: nodeLabels,
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
// Perform a sanity check so that we know all desired pods are
|
||||
// running on the nodes according to kubelet. The timeout is set to
|
||||
// only 30 seconds here because framework.RunRC already waited for all pods to
|
||||
// transition to the running status.
|
||||
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
|
||||
time.Second*30)).NotTo(gomega.HaveOccurred())
|
||||
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
|
||||
framework.ExpectNoError(err)
|
||||
if resourceMonitor != nil {
|
||||
resourceMonitor.LogLatest()
|
||||
}
|
||||
@ -345,8 +346,8 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
// - a bug in graceful termination (if it is enabled)
|
||||
// - docker slow to delete pods (or resource problems causing slowness)
|
||||
start := time.Now()
|
||||
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
|
||||
itArg.timeout)).NotTo(gomega.HaveOccurred())
|
||||
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
|
||||
time.Since(start))
|
||||
if resourceMonitor != nil {
|
||||
@ -396,9 +397,9 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err := framework.DeletePodWithWait(f, c, pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
|
||||
framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name)
|
||||
err = framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
|
||||
framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
|
||||
})
|
||||
|
||||
// execute It blocks from above table of tests
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -70,13 +69,14 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
||||
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
|
||||
|
||||
// TODO: Use a more realistic workload
|
||||
gomega.Expect(framework.RunRC(testutils.RCConfig{
|
||||
err := framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Log once and flush the stats.
|
||||
rm.LogLatest()
|
||||
@ -103,7 +103,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
||||
ginkgo.By("Reporting overall resource usage")
|
||||
logPodsOnNodes(f.ClientSet, nodeNames.List())
|
||||
usageSummary, err := rm.GetLatest()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO(random-liu): Remove the original log when we migrate to new perfdash
|
||||
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
|
||||
// Log perf result
|
||||
|
@ -79,7 +79,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
@ -93,7 +93,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
// We need to wait for the pod to be running, otherwise the deletion
|
||||
@ -101,25 +101,25 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
// save the running pod
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
|
||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||
|
||||
// start local proxy, so we can send graceful deletion over query string, rather than body parameter
|
||||
cmd := framework.KubectlCmd("proxy", "-p", "0")
|
||||
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to start up proxy")
|
||||
framework.ExpectNoError(err, "failed to start up proxy")
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
defer framework.TryKill(cmd)
|
||||
buf := make([]byte, 128)
|
||||
var n int
|
||||
n, err = stdout.Read(buf)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from kubectl proxy stdout")
|
||||
framework.ExpectNoError(err, "failed to read from kubectl proxy stdout")
|
||||
output := string(buf[:n])
|
||||
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
|
||||
match := proxyRegexp.FindStringSubmatch(output)
|
||||
gomega.Expect(len(match)).To(gomega.Equal(2))
|
||||
port, err := strconv.Atoi(match[1])
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to convert port into string")
|
||||
framework.ExpectNoError(err, "failed to convert port into string")
|
||||
|
||||
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
|
||||
tr := &http.Transport{
|
||||
@ -127,21 +127,21 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("DELETE", endpoint, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create http request")
|
||||
framework.ExpectNoError(err, "failed to create http request")
|
||||
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
rsp, err := client.Do(req)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to use http client to send delete")
|
||||
framework.ExpectNoError(err, "failed to use http client to send delete")
|
||||
gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
|
||||
var lastPod v1.Pod
|
||||
err = json.NewDecoder(rsp.Body).Decode(&lastPod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to decode graceful termination proxy response")
|
||||
framework.ExpectNoError(err, "failed to decode graceful termination proxy response")
|
||||
|
||||
defer rsp.Body.Close()
|
||||
|
||||
ginkgo.By("verifying the kubelet observed the termination notice")
|
||||
|
||||
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||
@ -159,7 +159,8 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
}
|
||||
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||
return true, nil
|
||||
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
|
||||
})
|
||||
framework.ExpectNoError(err, "kubelet never observed the termination notice")
|
||||
|
||||
gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
|
||||
gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero())
|
||||
@ -167,7 +168,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
|
||||
})
|
||||
@ -218,7 +219,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
|
||||
ginkgo.By("verifying QOS class is set on the pod")
|
||||
pod, err := podClient.Get(name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
|
||||
})
|
||||
})
|
||||
|
@ -33,7 +33,6 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// State partially cloned from webserver.go
|
||||
@ -192,11 +191,11 @@ var _ = SIGDescribe("PreStop", func() {
|
||||
|
||||
var err error
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
|
||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod")
|
||||
framework.ExpectNoError(err, "failed to delete pod")
|
||||
|
||||
//wait up to graceful termination period seconds
|
||||
time.Sleep(30 * time.Second)
|
||||
@ -205,9 +204,9 @@ var _ = SIGDescribe("PreStop", func() {
|
||||
result := &v1.PodList{}
|
||||
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
|
||||
client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the pods of the node")
|
||||
framework.ExpectNoError(err, "failed to get the pods of the node")
|
||||
err = client.Into(result)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to parse the pods of the node")
|
||||
framework.ExpectNoError(err, "failed to parse the pods of the node")
|
||||
|
||||
for _, kubeletPod := range result.Items {
|
||||
if pod.Name != kubeletPod.Name {
|
||||
|
@ -218,7 +218,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
|
||||
gomega.Expect(content).To(gomega.ContainSubstring(testContent))
|
||||
|
||||
foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Confirm that the file can be accessed from a second
|
||||
// pod using host_path with the same MCS label
|
||||
|
@ -50,11 +50,11 @@ func cleanupJob(f *framework.Framework, job *batch.Job) {
|
||||
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
||||
}
|
||||
_, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
|
||||
|
||||
err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testFinishedJob(f *framework.Framework) {
|
||||
@ -73,19 +73,19 @@ func testFinishedJob(f *framework.Framework) {
|
||||
|
||||
e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name)
|
||||
job, err := jobutil.CreateJob(c, ns, job)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Wait for the Job to finish")
|
||||
err = jobutil.WaitForJobFinish(c, ns, job.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Wait for TTL after finished controller to delete the Job")
|
||||
err = jobutil.WaitForJobDeleting(c, ns, job.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
||||
job, err = jobutil.GetJob(c, ns, job.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
finishTime := jobutil.FinishTime(job)
|
||||
finishTimeUTC := finishTime.UTC()
|
||||
gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue())
|
||||
|
Loading…
Reference in New Issue
Block a user