mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
test/e2e/: use framework.Equal() replace gomega.Expect(...).To(gomega.BeTrue()|BeFalse())
This commit is contained in:
parent
92a14f4f7e
commit
f786cb07b7
@ -371,26 +371,28 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
|||||||
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1)
|
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyV1Object(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
func verifyV1Object(crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
||||||
gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v1"))
|
gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v1"))
|
||||||
hostPort, exists := obj.Object["hostPort"]
|
hostPort, exists := obj.Object["hostPort"]
|
||||||
gomega.Expect(exists).To(gomega.BeTrue())
|
framework.ExpectEqual(exists, true)
|
||||||
|
|
||||||
gomega.Expect(hostPort).To(gomega.BeEquivalentTo("localhost:8080"))
|
gomega.Expect(hostPort).To(gomega.BeEquivalentTo("localhost:8080"))
|
||||||
_, hostExists := obj.Object["host"]
|
_, hostExists := obj.Object["host"]
|
||||||
gomega.Expect(hostExists).To(gomega.BeFalse())
|
framework.ExpectEqual(hostExists, false)
|
||||||
_, portExists := obj.Object["port"]
|
_, portExists := obj.Object["port"]
|
||||||
gomega.Expect(portExists).To(gomega.BeFalse())
|
framework.ExpectEqual(portExists, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyV2Object(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
func verifyV2Object(crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
||||||
gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v2"))
|
gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v2"))
|
||||||
_, hostPortExists := obj.Object["hostPort"]
|
_, hostPortExists := obj.Object["hostPort"]
|
||||||
gomega.Expect(hostPortExists).To(gomega.BeFalse())
|
framework.ExpectEqual(hostPortExists, false)
|
||||||
|
|
||||||
host, hostExists := obj.Object["host"]
|
host, hostExists := obj.Object["host"]
|
||||||
gomega.Expect(hostExists).To(gomega.BeTrue())
|
framework.ExpectEqual(hostExists, true)
|
||||||
gomega.Expect(host).To(gomega.BeEquivalentTo("localhost"))
|
gomega.Expect(host).To(gomega.BeEquivalentTo("localhost"))
|
||||||
port, portExists := obj.Object["port"]
|
port, portExists := obj.Object["port"]
|
||||||
gomega.Expect(portExists).To(gomega.BeTrue())
|
framework.ExpectEqual(portExists, true)
|
||||||
gomega.Expect(port).To(gomega.BeEquivalentTo("8080"))
|
gomega.Expect(port).To(gomega.BeEquivalentTo("8080"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,7 +415,7 @@ func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensi
|
|||||||
ginkgo.By("v2 custom resource should be converted")
|
ginkgo.By("v2 custom resource should be converted")
|
||||||
v2crd, err := customResourceClients["v2"].Get(name, metav1.GetOptions{})
|
v2crd, err := customResourceClients["v2"].Get(name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "Getting v2 of custom resource %s", name)
|
framework.ExpectNoError(err, "Getting v2 of custom resource %s", name)
|
||||||
verifyV2Object(f, crd, v2crd)
|
verifyV2Object(crd, v2crd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
||||||
@ -473,19 +475,19 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
|||||||
list, err := customResourceClients["v1"].List(metav1.ListOptions{})
|
list, err := customResourceClients["v1"].List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
|
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
|
||||||
gomega.Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(gomega.BeTrue())
|
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1), true)
|
||||||
verifyV1Object(f, crd, &list.Items[0])
|
verifyV1Object(crd, &list.Items[0])
|
||||||
verifyV1Object(f, crd, &list.Items[1])
|
verifyV1Object(crd, &list.Items[1])
|
||||||
|
|
||||||
ginkgo.By("List CRs in v2")
|
ginkgo.By("List CRs in v2")
|
||||||
list, err = customResourceClients["v2"].List(metav1.ListOptions{})
|
list, err = customResourceClients["v2"].List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
|
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
|
||||||
gomega.Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(gomega.BeTrue())
|
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1), true)
|
||||||
verifyV2Object(f, crd, &list.Items[0])
|
verifyV2Object(crd, &list.Items[0])
|
||||||
verifyV2Object(f, crd, &list.Items[1])
|
verifyV2Object(crd, &list.Items[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds.
|
// waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds.
|
||||||
|
@ -995,7 +995,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
|
|||||||
client := f.ClientSet
|
client := f.ClientSet
|
||||||
configMap := toBeMutatedConfigMap(f)
|
configMap := toBeMutatedConfigMap(f)
|
||||||
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
|
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
framework.ExpectNoError(err)
|
||||||
expectedConfigMapData := map[string]string{
|
expectedConfigMapData := map[string]string{
|
||||||
"mutation-start": "yes",
|
"mutation-start": "yes",
|
||||||
"mutation-stage-1": "yes",
|
"mutation-stage-1": "yes",
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -214,7 +214,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
ginkgo.By("Ensuring job was deleted")
|
ginkgo.By("Ensuring job was deleted")
|
||||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(errors.IsNotFound(err), true)
|
||||||
|
|
||||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -478,7 +478,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||||||
rollbackPods[pod.Name] = true
|
rollbackPods[pod.Name] = true
|
||||||
}
|
}
|
||||||
for _, pod := range existingPods {
|
for _, pod := range existingPods {
|
||||||
gomega.Expect(rollbackPods[pod.Name]).To(gomega.BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
framework.ExpectEqual(rollbackPods[pod.Name], true, fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -205,7 +205,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
|||||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(errors.IsNotFound(err), true)
|
||||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||||
@ -162,7 +162,7 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
ginkgo.By("Ensuring job was deleted")
|
ginkgo.By("Ensuring job was deleted")
|
||||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(errors.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
restartCount := getRestartCount(p)
|
restartCount := getRestartCount(p)
|
||||||
gomega.Expect(restartCount == 0).To(gomega.BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
|
framework.ExpectEqual(restartCount, 0, "pod should have a restart count of 0 but got %v", restartCount)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -108,7 +108,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||||||
framework.ExpectNotEqual(isReady, true, "pod should be not ready")
|
framework.ExpectNotEqual(isReady, true, "pod should be not ready")
|
||||||
|
|
||||||
restartCount := getRestartCount(p)
|
restartCount := getRestartCount(p)
|
||||||
gomega.Expect(restartCount == 0).To(gomega.BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
|
framework.ExpectEqual(restartCount, 0, "pod should have a restart count of 0 but got %v", restartCount)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -11,7 +11,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,8 +25,6 @@ import (
|
|||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/kubernetes/pkg/kubemark"
|
"k8s.io/kubernetes/pkg/kubemark"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -77,7 +75,7 @@ func (p *Provider) FrameworkBeforeEach(f *framework.Framework) {
|
|||||||
p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
externalInformerFactory.Start(p.closeChannel)
|
externalInformerFactory.Start(p.closeChannel)
|
||||||
gomega.Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(gomega.BeTrue())
|
framework.ExpectEqual(p.controller.WaitForCacheSync(p.closeChannel), true)
|
||||||
go p.controller.Run(p.closeChannel)
|
go p.controller.Run(p.closeChannel)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -527,7 +527,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||||||
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
|
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
|
||||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
|
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
|
||||||
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||||
gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
|
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
|
||||||
@ -1385,7 +1385,7 @@ metadata:
|
|||||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
|
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
|
||||||
nsFlag = fmt.Sprintf("--namespace=%v", ns)
|
nsFlag = fmt.Sprintf("--namespace=%v", ns)
|
||||||
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag)
|
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag)
|
||||||
gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
|
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
|
||||||
@ -1426,7 +1426,7 @@ metadata:
|
|||||||
nsFlag = fmt.Sprintf("--namespace=%v", ns)
|
nsFlag = fmt.Sprintf("--namespace=%v", ns)
|
||||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml")))
|
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml")))
|
||||||
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag)
|
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag)
|
||||||
gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
|
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
|
||||||
@ -1969,7 +1969,7 @@ metadata:
|
|||||||
ginkgo.By("verifying the job " + jobName + " was deleted")
|
ginkgo.By("verifying the job " + jobName + " was deleted")
|
||||||
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
gomega.Expect(apierrs.IsNotFound(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(apierrs.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -2198,7 +2198,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
|
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
|
||||||
svcPort := int(svc.Spec.Ports[0].Port)
|
svcPort := int(svc.Spec.Ports[0].Port)
|
||||||
// should have an internal IP.
|
// should have an internal IP.
|
||||||
gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeTrue())
|
framework.ExpectEqual(isInternalEndpoint(lbIngress), true)
|
||||||
|
|
||||||
// ILBs are not accessible from the test orchestrator, so it's necessary to use
|
// ILBs are not accessible from the test orchestrator, so it's necessary to use
|
||||||
// a pod to test the service.
|
// a pod to test the service.
|
||||||
@ -3060,19 +3060,19 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
if !isTransitionTest {
|
if !isTransitionTest {
|
||||||
gomega.Expect(checkAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
|
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true)
|
||||||
}
|
}
|
||||||
if isTransitionTest {
|
if isTransitionTest {
|
||||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(checkAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
|
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, false), true)
|
||||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(checkAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
|
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3110,19 +3110,19 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
|
|||||||
port := int(svc.Spec.Ports[0].Port)
|
port := int(svc.Spec.Ports[0].Port)
|
||||||
|
|
||||||
if !isTransitionTest {
|
if !isTransitionTest {
|
||||||
gomega.Expect(checkAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
|
framework.ExpectEqual(checkAffinity(nil, ingressIP, port, true), true)
|
||||||
}
|
}
|
||||||
if isTransitionTest {
|
if isTransitionTest {
|
||||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(checkAffinity(nil, ingressIP, port, false)).To(gomega.BeTrue())
|
framework.ExpectEqual(checkAffinity(nil, ingressIP, port, false), true)
|
||||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(checkAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
|
framework.ExpectEqual(checkAffinity(nil, ingressIP, port, true), true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
|||||||
}
|
}
|
||||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
framework.ExpectError(err, "should be forbidden")
|
framework.ExpectError(err, "should be forbidden")
|
||||||
gomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), "should be forbidden error")
|
framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error")
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ", func() {
|
ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ", func() {
|
||||||
|
@ -145,7 +145,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
||||||
podDeleted := (err != nil && errors.IsNotFound(err)) ||
|
podDeleted := (err != nil && errors.IsNotFound(err)) ||
|
||||||
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
||||||
gomega.Expect(podDeleted).To(gomega.BeTrue())
|
framework.ExpectEqual(podDeleted, true)
|
||||||
// Other pods (mid priority ones) should be present.
|
// Other pods (mid priority ones) should be present.
|
||||||
for i := 1; i < len(pods); i++ {
|
for i := 1; i < len(pods); i++ {
|
||||||
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
|
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
|
||||||
@ -214,7 +214,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
||||||
podDeleted := (err != nil && errors.IsNotFound(err)) ||
|
podDeleted := (err != nil && errors.IsNotFound(err)) ||
|
||||||
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
||||||
gomega.Expect(podDeleted).To(gomega.BeTrue())
|
framework.ExpectEqual(podDeleted, true)
|
||||||
// Other pods (mid priority ones) should be present.
|
// Other pods (mid priority ones) should be present.
|
||||||
for i := 1; i < len(pods); i++ {
|
for i := 1; i < len(pods); i++ {
|
||||||
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
|
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
|
||||||
|
@ -21,9 +21,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -168,10 +167,10 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
|
pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
|
||||||
gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found")
|
framework.ExpectEqual(ok, true, "PV has no LabelZone to be found")
|
||||||
pvZones.Insert(pvZone)
|
pvZones.Insert(pvZone)
|
||||||
}
|
}
|
||||||
gomega.Expect(pvZones.Equal(expectedZones)).To(gomega.BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
|
framework.ExpectEqual(pvZones.Equal(expectedZones), true, fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
|
||||||
}
|
}
|
||||||
|
|
||||||
type staticPVTestConfig struct {
|
type staticPVTestConfig struct {
|
||||||
|
Loading…
Reference in New Issue
Block a user