mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Use framework.ExpectEqual() under e2e/scheduling
This commit is contained in:
parent
f978c4cab5
commit
e7752f72e4
@ -181,7 +181,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
|
||||
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(podList.Items)).To(gomega.Equal(2))
|
||||
framework.ExpectEqual(len(podList.Items), 2)
|
||||
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
|
||||
gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1]))
|
||||
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -59,7 +58,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
||||
framework.ExpectNoError(err, "failed to query for limitRanges")
|
||||
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
|
||||
framework.ExpectEqual(len(limitRanges.Items), 0)
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -108,7 +107,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
for _, node := range nodeList.Items {
|
||||
e2elog.Logf("Node: %v", node)
|
||||
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
totalPodCapacity += podCapacity.Value()
|
||||
}
|
||||
|
||||
@ -148,7 +147,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
nodeToAllocatableMap := make(map[string]int64)
|
||||
for _, node := range nodeList.Items {
|
||||
allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
nodeToAllocatableMap[node.Name] = allocatable.Value()
|
||||
if nodeMaxAllocatable < allocatable.Value() {
|
||||
nodeMaxAllocatable = allocatable.Value()
|
||||
@ -248,7 +247,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
|
||||
// Find allocatable amount of CPU.
|
||||
allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
|
||||
if nodeMaxAllocatable < allocatable.MilliValue() {
|
||||
nodeMaxAllocatable = allocatable.MilliValue()
|
||||
@ -384,7 +383,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName))
|
||||
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
framework.ExpectEqual(labelPod.Spec.NodeName, nodeName)
|
||||
})
|
||||
|
||||
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
||||
@ -471,7 +470,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName))
|
||||
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
framework.ExpectEqual(labelPod.Spec.NodeName, nodeName)
|
||||
})
|
||||
|
||||
// 1. Run a pod to get an available node, then delete the pod
|
||||
@ -514,7 +513,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName))
|
||||
deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName)
|
||||
})
|
||||
|
||||
// 1. Run a pod to get an available node, then delete the pod
|
||||
@ -716,7 +715,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n
|
||||
}
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(success).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(success, true)
|
||||
}
|
||||
|
||||
// TODO: upgrade calls in PodAffinity tests when we're able to run them
|
||||
@ -734,8 +733,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
|
||||
return ""
|
||||
}
|
||||
|
||||
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
||||
gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
|
||||
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
||||
framework.ExpectEqual(len(scheduledPods), expectedScheduled, printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
|
||||
}
|
||||
|
||||
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
|
||||
@ -752,8 +751,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected
|
||||
return ""
|
||||
}
|
||||
|
||||
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
||||
gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
|
||||
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
||||
framework.ExpectEqual(len(scheduledPods), expectedScheduled, printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
|
||||
}
|
||||
|
||||
func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
|
||||
|
@ -78,7 +78,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
nodeList = &v1.NodeList{}
|
||||
for _, pair := range priorityPairs {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||
}
|
||||
|
||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||
@ -98,10 +98,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
pods := make([]*v1.Pod, len(nodeList.Items))
|
||||
for i, node := range nodeList.Items {
|
||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
|
||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
memory := memAllocatable.Value() * 60 / 100
|
||||
podRes = v1.ResourceList{}
|
||||
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
@ -158,10 +158,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
pods := make([]*v1.Pod, len(nodeList.Items))
|
||||
for i, node := range nodeList.Items {
|
||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
|
||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
memory := memAllocatable.Value() * 60 / 100
|
||||
podRes = v1.ResourceList{}
|
||||
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
@ -456,7 +456,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
|
||||
e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
|
||||
}
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -195,7 +195,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
}
|
||||
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(success).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(success, true)
|
||||
|
||||
defer framework.RemoveAvoidPodsOffNode(cs, nodeName)
|
||||
|
||||
@ -239,7 +239,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
ginkgo.By("Pod should prefer scheduled to the node don't have the taint.")
|
||||
tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
framework.ExpectEqual(tolePod.Spec.NodeName, nodeName)
|
||||
|
||||
ginkgo.By("Trying to apply 10 taint on the first node.")
|
||||
var tolerations []v1.Toleration
|
||||
@ -259,7 +259,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
|
||||
tolePod, err = cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
framework.ExpectEqual(tolePod.Spec.NodeName, nodeName)
|
||||
})
|
||||
})
|
||||
|
||||
@ -284,11 +284,11 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
|
||||
ratio = math.Max(maxCPUFraction, maxMemFraction)
|
||||
for _, node := range nodes {
|
||||
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
memAllocatableVal := memAllocatable.Value()
|
||||
|
||||
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
||||
|
||||
needCreateResource := v1.ResourceList{}
|
||||
@ -342,7 +342,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
||||
}
|
||||
}
|
||||
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
||||
|
||||
floatOne := float64(1)
|
||||
@ -351,7 +351,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
||||
cpuFraction = floatOne
|
||||
}
|
||||
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
framework.ExpectEqual(found, true)
|
||||
memAllocatableVal := memAllocatable.Value()
|
||||
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
|
||||
if memFraction > floatOne {
|
||||
|
@ -113,7 +113,8 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||
// Now make sure they're spread across zones
|
||||
zoneNames, err := framework.GetClusterZones(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
|
||||
ret, _ := checkZoneSpreading(f.ClientSet, pods, zoneNames.List())
|
||||
framework.ExpectEqual(ret, true)
|
||||
}
|
||||
|
||||
// Find the name of the zone in which a Node is running
|
||||
@ -228,5 +229,6 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar
|
||||
// Now make sure they're spread across zones
|
||||
zoneNames, err := framework.GetClusterZones(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
|
||||
ret, _ := checkZoneSpreading(f.ClientSet, pods, zoneNames.List())
|
||||
framework.ExpectEqual(ret, true)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user