From c1de3d6e5b48b594370545b1f4eadbde3333f708 Mon Sep 17 00:00:00 2001 From: zouyee Date: Tue, 14 Jan 2020 09:43:04 +0800 Subject: [PATCH] fix ci-kubernetes-node-kubelet-serial Non-system critical priority classes are not allowed to have a value larger than HighestUserDefinablePriority Signed-off-by: Zou Nengren --- test/e2e_node/critical_pod_test.go | 46 ++++++++++++++---------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 218504264ca..cef85e10baa 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -20,8 +20,6 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - schedulingv1 "k8s.io/api/scheduling/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeapi "k8s.io/kubernetes/pkg/apis/core" @@ -35,11 +33,10 @@ import ( ) const ( - criticalPodName = "critical-pod" - guaranteedPodName = "guaranteed" - burstablePodName = "burstable" - bestEffortPodName = "best-effort" - systemCriticalPriorityName = "critical-pod-test-high-priority" + criticalPodName = "static-critical-pod" + guaranteedPodName = "guaranteed" + burstablePodName = "burstable" + bestEffortPodName = "best-effort" ) var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() { @@ -58,8 +55,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C framework.Skipf("unable to run test without dynamic kubelet config enabled.") } // because adminssion Priority enable, If the priority class is not found, the Pod is rejected. - systemCriticalPriority := &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: systemCriticalPriorityName}, Value: scheduling.SystemCriticalPriority} - + node := getNodeName(f) // Define test pods nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -70,22 +66,19 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceMemory: resource.MustParse("100Mi"), }, - }) + }, node) nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceMemory: resource.MustParse("100Mi"), }, - }) - nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{}) + }, node) + nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{}, node) criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{ // request the entire resource capacity of the node, so that // admitting this pod requires the other pod to be preempted Requests: getNodeCPUAndMemoryCapacity(f), - }) - - _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority) - framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) + }, node) // Create pods, starting with non-critical so that the critical preempts the other pods. f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) @@ -96,9 +89,9 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C framework.ExpectNoError(err) for _, p := range updatedPodList.Items { if p.Name == nonCriticalBestEffort.Name { - framework.ExpectNotEqual(p.Status.Phase, v1.PodFailed, fmt.Sprintf("pod: %v should be preempted", p.Name)) + framework.ExpectEqual(p.Status.Phase, v1.PodRunning, fmt.Sprintf("pod: %v should not be preempted with status: %#v", p.Name, p.Status)) } else { - framework.ExpectEqual(p.Status.Phase, v1.PodFailed, fmt.Sprintf("pod: %v should not be preempted", p.Name)) + framework.ExpectEqual(p.Status.Phase, v1.PodFailed, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status)) } } }) @@ -108,8 +101,6 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(systemCriticalPriorityName, &metav1.DeleteOptions{}) - framework.ExpectNoError(err) // Log Events logPodEvents(f) logNodeEvents(f) @@ -130,8 +121,15 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList { } } -func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *v1.Pod { - value := scheduling.SystemCriticalPriority +func getNodeName(f *framework.Framework) string { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + framework.ExpectNoError(err) + // Assuming that there is only one node, because this is a node e2e test. + framework.ExpectEqual(len(nodeList.Items), 1) + return nodeList.Items[0].GetName() +} + +func getTestPod(critical bool, name string, resources v1.ResourceRequirements, node string) *v1.Pod { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -146,6 +144,7 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) * Resources: resources, }, }, + NodeName: node, }, } if critical { @@ -153,8 +152,7 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) * pod.ObjectMeta.Annotations = map[string]string{ kubelettypes.ConfigSourceAnnotationKey: kubelettypes.FileSource, } - pod.Spec.PriorityClassName = systemCriticalPriorityName - pod.Spec.Priority = &value + pod.Spec.PriorityClassName = scheduling.SystemNodeCritical framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod") } else {