Merge pull request #106486 from Ahmed-Aghadi/codeEnhanceNode

test/e2e/node + test/e2e/scheduling: improve checks
This commit is contained in:
Kubernetes Prow Robot 2022-02-28 11:17:46 -08:00 committed by GitHub
commit 2c91952fcf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 59 additions and 28 deletions

View File

@ -58,8 +58,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
labelFooName: "bar",
}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err, "should be forbidden")
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
if !apierrors.IsForbidden(err) {
framework.Failf("expected 'forbidden' as error, got instead: %v", err)
}
})
ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] ", func() {

View File

@ -23,7 +23,7 @@ import (
"sync"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -81,8 +81,10 @@ func observeEventAfterAction(c clientset.Interface, ns string, eventPredicate fu
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e, ok := obj.(*v1.Event)
if !ok {
framework.Failf("Expected *v1.Event, got %T %v", obj, obj)
}
ginkgo.By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message))
framework.ExpectEqual(ok, true)
if eventPredicate(e) {
observedMatchingEvent = true
}

View File

@ -126,7 +126,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage]
framework.ExpectEqual(found, true)
if !found {
framework.Failf("node.Status.Allocatable %v does not contain entry %v", node.Status.Allocatable, v1.ResourceEphemeralStorage)
}
nodeToAllocatableMap[node.Name] = allocatable.Value()
if nodeMaxAllocatable < allocatable.Value() {
nodeMaxAllocatable = allocatable.Value()
@ -145,9 +147,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}
var podsNeededForSaturation int
var ephemeralStoragePerPod int64
ephemeralStoragePerPod = nodeMaxAllocatable / maxNumberOfPods
ephemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods
framework.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
for name, leftAllocatable := range nodeToAllocatableMap {
@ -267,7 +267,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
})
ginkgo.It("verify pod overhead is accounted for", func() {
framework.ExpectEqual(testNodeName != "", true)
if testNodeName == "" {
framework.Fail("unable to find a node which can run a pod")
}
ginkgo.By("Starting Pod to consume most of the node's resource.")
@ -339,7 +341,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// Find allocatable amount of CPU.
allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
framework.ExpectEqual(found, true)
if !found {
framework.Failf("node.Status.Allocatable %v does not contain entry %v", node.Status.Allocatable, v1.ResourceCPU)
}
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxAllocatable < allocatable.MilliValue() {
nodeMaxAllocatable = allocatable.MilliValue()
@ -934,9 +938,14 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN
if expectSuccess {
predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
}
success, err := observeEventAfterAction(f.ClientSet, f.Namespace.Name, predicate, action)
observed, err := observeEventAfterAction(f.ClientSet, f.Namespace.Name, predicate, action)
framework.ExpectNoError(err)
framework.ExpectEqual(success, true)
if expectSuccess && !observed {
framework.Failf("Did not observe success event after performing the supplied action for pod %v", podName)
}
if !expectSuccess && !observed {
framework.Failf("Did not observe failed event after performing the supplied action for pod %v", podName)
}
}
// TODO: upgrade calls in PodAffinity tests when we're able to run them
@ -986,7 +995,7 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
// CreateHostPortPods creates RC with host port 4321
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
ginkgo.By("Running RC which reserves host port")
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
@ -1004,7 +1013,7 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
ginkgo.By("Running RC which reserves host port and defines node selector")
config := &testutils.RCConfig{
Client: f.ClientSet,
@ -1074,11 +1083,13 @@ func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods,
for _, pod := range pods.Items {
if pod.Spec.NodeName != "" && workerNodes.Has(pod.Spec.NodeName) {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
framework.ExpectEqual(scheduledCondition != nil, true)
if scheduledCondition != nil {
framework.ExpectEqual(scheduledCondition.Status, v1.ConditionTrue)
scheduledPods = append(scheduledPods, pod)
if scheduledCondition == nil {
framework.Failf("Did not find 'scheduled' condition for pod %+v", podName)
}
if scheduledCondition.Status != v1.ConditionTrue {
framework.Failf("PodStatus isn't 'true' for pod %+v", podName)
}
scheduledPods = append(scheduledPods, pod)
} else if pod.Spec.NodeName == "" {
notScheduledPods = append(notScheduledPods, pod)
}

View File

@ -94,7 +94,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var err error
for _, pair := range priorityPairs {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 'alreadyExists' as error, got instead: %v", err)
}
}
e2enode.WaitForTotalHealthy(cs, time.Minute)
@ -194,13 +196,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{})
podPreempted := (err != nil && apierrors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
if !podPreempted {
framework.Failf("expected pod to be preempted, instead got pod %+v and error %v", preemptedPod, err)
}
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
framework.ExpectEqual(podPreempted, true)
})
/*
@ -303,7 +306,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
framework.ExpectEqual(podPreempted, true)
if !podPreempted {
framework.Failf("expected pod to be preempted, instead got pod %+v and error %v", preemptedPod, err)
}
})
ginkgo.Context("PodTopologySpread Preemption", func() {
@ -521,7 +526,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
if err != nil {
framework.Logf("Failed to create priority '%v/%v'. Reason: %v. Msg: %v", priorityName, priorityVal, apierrors.ReasonForError(err), err)
}
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 'alreadyExists' as error, got instead: %v", err)
}
}
})
@ -685,7 +692,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
if err != nil {
framework.Logf("Failed to create priority '%v/%v'. Reason: %v. Msg: %v", name, val, apierrors.ReasonForError(err), err)
}
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 'alreadyExists' as error, got instead: %v", err)
}
pcs = append(pcs, pc)
}
})

View File

@ -413,11 +413,15 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
ratio = math.Max(maxCPUFraction, maxMemFraction)
for _, node := range nodes {
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
framework.ExpectEqual(found, true)
if !found {
framework.Failf("Node %v: node.Status.Allocatable %v does not contain entry %v", node.Name, node.Status.Allocatable, v1.ResourceMemory)
}
memAllocatableVal := memAllocatable.Value()
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
framework.ExpectEqual(found, true)
if !found {
framework.Failf("Node %v: node.Status.Allocatable %v does not contain entry %v", node.Name, node.Status.Allocatable, v1.ResourceCPU)
}
cpuAllocatableMil := cpuAllocatable.MilliValue()
needCreateResource := v1.ResourceList{}
@ -508,7 +512,9 @@ func computeCPUMemFraction(node v1.Node, resource *v1.ResourceRequirements, pods
}
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
framework.ExpectEqual(found, true)
if !found {
framework.Failf("Node %v: node.Status.Allocatable %v does not contain entry %v", node.Name, node.Status.Allocatable, v1.ResourceCPU)
}
cpuAllocatableMil := cpuAllocatable.MilliValue()
floatOne := float64(1)
@ -517,7 +523,9 @@ func computeCPUMemFraction(node v1.Node, resource *v1.ResourceRequirements, pods
cpuFraction = floatOne
}
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
framework.ExpectEqual(found, true)
if !found {
framework.Failf("Node %v: node.Status.Allocatable %v does not contain entry %v", node.Name, node.Status.Allocatable, v1.ResourceMemory)
}
memAllocatableVal := memAllocatable.Value()
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
if memFraction > floatOne {