mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #103720 from alculquicondor/rm-e2e-preferavoidpods
Remove E2E test for NodePreferAvoidPods scheduling Score
This commit is contained in:
commit
4de033c2c0
@ -25,7 +25,6 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@ -56,64 +55,6 @@ func scheduleFailureEvent(podName string) func(*v1.Event) bool {
|
||||
// Action is a function to be performed by the system.
|
||||
type Action func() error
|
||||
|
||||
// observeNodeUpdateAfterAction returns true if a node update matching the predicate was emitted
|
||||
// from the system after performing the supplied action.
|
||||
func observeNodeUpdateAfterAction(c clientset.Interface, nodeName string, nodePredicate func(*v1.Node) bool, action Action) (bool, error) {
|
||||
observedMatchingNode := false
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
|
||||
informerStartedChan := make(chan struct{})
|
||||
var informerStartedGuard sync.Once
|
||||
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
ls, err := c.CoreV1().Nodes().List(context.TODO(), options)
|
||||
return ls, err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
// Signal parent goroutine that watching has begun.
|
||||
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
w, err := c.CoreV1().Nodes().Watch(context.TODO(), options)
|
||||
return w, err
|
||||
},
|
||||
},
|
||||
&v1.Node{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
framework.ExpectEqual(ok, true)
|
||||
if nodePredicate(n) {
|
||||
observedMatchingNode = true
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Start the informer and block this goroutine waiting for the started signal.
|
||||
informerStopChan := make(chan struct{})
|
||||
defer func() { close(informerStopChan) }()
|
||||
go controller.Run(informerStopChan)
|
||||
<-informerStartedChan
|
||||
|
||||
// Invoke the action function.
|
||||
err := action()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Poll whether the informer has found a matching node update with a timeout.
|
||||
// Wait up 2 minutes polling every second.
|
||||
timeout := 2 * time.Minute
|
||||
interval := 1 * time.Second
|
||||
err = wait.Poll(interval, timeout, func() (bool, error) {
|
||||
return observedMatchingNode, nil
|
||||
})
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
// observeEventAfterAction returns true if an event matching the predicate was emitted
|
||||
// from the system after performing the supplied action.
|
||||
func observeEventAfterAction(c clientset.Interface, ns string, eventPredicate func(*v1.Event) bool, action Action) (bool, error) {
|
||||
|
@ -18,7 +18,6 @@ package scheduling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
@ -30,7 +29,6 @@ import (
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -43,10 +41,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// Resource is a collection of compute resource.
|
||||
@ -73,61 +69,6 @@ var podRequestedResource = &v1.ResourceRequirements{
|
||||
},
|
||||
}
|
||||
|
||||
// addOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists
|
||||
func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) {
|
||||
err := wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
taintsData, err := json.Marshal(avoidPods)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if node.Annotations == nil {
|
||||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
|
||||
_, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsConflict(err) {
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
framework.Logf("Conflict when trying to add/update avoidPods %v to %v with error %v", avoidPods, nodeName, err)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// removeAvoidPodsOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists.
|
||||
func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) {
|
||||
err := wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if node.Annotations == nil {
|
||||
return true, nil
|
||||
}
|
||||
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
|
||||
_, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsConflict(err) {
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
framework.Logf("Conflict when trying to remove avoidPods to %v", nodeName)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// nodesAreTooUtilized ensures that each node can support 2*crioMinMemLimit
|
||||
// We check for double because it needs to support at least the cri-o minimum
|
||||
// plus whatever delta between node usages (which could be up to or at least crioMinMemLimit)
|
||||
@ -262,70 +203,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
framework.ExpectNotEqual(labelPod.Spec.NodeName, nodeName)
|
||||
})
|
||||
|
||||
ginkgo.It("Pod should avoid nodes that have avoidPod annotation", func() {
|
||||
nodeName := nodeList.Items[0].Name
|
||||
// make the nodes have balanced cpu,mem usage
|
||||
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
|
||||
defer cleanUp()
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By("Create a RC, with 0 replicas")
|
||||
rc := createRC(ns, "scheduler-priority-avoid-pod", int32(0), map[string]string{"name": "scheduler-priority-avoid-pod"}, f, podRequestedResource)
|
||||
// Cleanup the replication controller when we are done.
|
||||
defer func() {
|
||||
// Resize the replication controller to zero to get rid of pods.
|
||||
if err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
|
||||
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.By("Trying to apply avoidPod annotations on the first node.")
|
||||
avoidPod := v1.AvoidPods{
|
||||
PreferAvoidPods: []v1.PreferAvoidPodsEntry{
|
||||
{
|
||||
PodSignature: v1.PodSignature{
|
||||
PodController: &metav1.OwnerReference{
|
||||
APIVersion: "v1",
|
||||
Kind: "ReplicationController",
|
||||
Name: rc.Name,
|
||||
UID: rc.UID,
|
||||
Controller: func() *bool { b := true; return &b }(),
|
||||
},
|
||||
},
|
||||
Reason: "some reson",
|
||||
Message: "some message",
|
||||
},
|
||||
},
|
||||
}
|
||||
action := func() error {
|
||||
addOrUpdateAvoidPodOnNode(cs, nodeName, avoidPod)
|
||||
return nil
|
||||
}
|
||||
predicate := func(node *v1.Node) bool {
|
||||
val, err := json.Marshal(avoidPod)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return node.Annotations[v1.PreferAvoidPodsAnnotationKey] == string(val)
|
||||
}
|
||||
success, err := observeNodeUpdateAfterAction(f.ClientSet, nodeName, predicate, action)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(success, true)
|
||||
|
||||
defer removeAvoidPodsOffNode(cs, nodeName)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
|
||||
|
||||
e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
|
||||
testPods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "name=scheduler-priority-avoid-pod",
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
|
||||
for _, pod := range testPods.Items {
|
||||
framework.ExpectNotEqual(pod.Spec.NodeName, nodeName)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func() {
|
||||
// make the nodes have balanced cpu,mem usage ratio
|
||||
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
|
||||
@ -664,38 +541,6 @@ func getNonZeroRequests(pod *v1.Pod) Resource {
|
||||
return result
|
||||
}
|
||||
|
||||
func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string, f *framework.Framework, resource *v1.ResourceRequirements) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &replicas,
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rcPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: rsName,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: *resource,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return rc
|
||||
}
|
||||
|
||||
func getRandomTaint() v1.Taint {
|
||||
return v1.Taint{
|
||||
Key: fmt.Sprintf("kubernetes.io/e2e-scheduling-priorities-%s", string(uuid.NewUUID()[:23])),
|
||||
|
Loading…
Reference in New Issue
Block a user