mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #113615 from kerthcet/feat/add-benchmark-tests
Add nodeInclusionPolicy benchmark tests to scheduler_perf
This commit is contained in:
commit
73f6b96f0a
@ -99,7 +99,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes(nextNodeIndex int) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := GetReadySchedulableNodes(p.client)
|
nodes, err := waitListAllNodes(p.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("Error listing nodes: %v", err)
|
klog.Fatalf("Error listing nodes: %v", err)
|
||||||
}
|
}
|
||||||
@ -119,7 +119,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes(nextNodeIndex int) error {
|
|||||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||||
// TODO(#93794): make CleanupNodes only clean up the nodes created by this
|
// TODO(#93794): make CleanupNodes only clean up the nodes created by this
|
||||||
// IntegrationTestNodePreparer to make this more intuitive.
|
// IntegrationTestNodePreparer to make this more intuitive.
|
||||||
nodes, err := GetReadySchedulableNodes(p.client)
|
nodes, err := waitListAllNodes(p.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("Error listing nodes: %v", err)
|
klog.Fatalf("Error listing nodes: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -21,16 +21,13 @@ package framework
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||||
)
|
)
|
||||||
@ -62,42 +59,12 @@ func DeleteNamespaceOrDie(c clientset.Interface, ns *v1.Namespace, t testing.TB)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReadySchedulableNodes addresses the common use case of getting nodes you can do work on.
|
// waitListAllNodes is a wrapper around listing nodes supporting retries.
|
||||||
// 1) Needs to be schedulable.
|
func waitListAllNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||||
// 2) Needs to be ready.
|
|
||||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
|
||||||
// If there are no nodes that are both ready and schedulable, this will return an error.
|
|
||||||
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
|
||||||
nodes, err = checkWaitListSchedulableNodes(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
|
||||||
}
|
|
||||||
Filter(nodes, func(node v1.Node) bool {
|
|
||||||
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
|
|
||||||
})
|
|
||||||
if len(nodes.Items) == 0 {
|
|
||||||
return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster")
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries.
|
|
||||||
func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
|
||||||
nodes, err := waitListSchedulableNodes(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for integration test cluster", err)
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
|
|
||||||
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
|
||||||
var nodes *v1.NodeList
|
var nodes *v1.NodeList
|
||||||
var err error
|
var err error
|
||||||
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||||
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
|
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
"spec.unschedulable": "false",
|
|
||||||
}.AsSelector().String()})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -221,52 +188,3 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// isNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints.
|
|
||||||
// TODO: need to discuss wether to return bool and error type
|
|
||||||
func isNodeUntainted(node *v1.Node) bool {
|
|
||||||
nonblockingTaints := ""
|
|
||||||
fakePod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "fake-not-scheduled",
|
|
||||||
Namespace: "fake-not-scheduled",
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "fake-not-scheduled",
|
|
||||||
Image: "fake-not-scheduled",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simple lookup for nonblocking taints based on comma-delimited list.
|
|
||||||
nonblockingTaintsMap := map[string]struct{}{}
|
|
||||||
for _, t := range strings.Split(nonblockingTaints, ",") {
|
|
||||||
if strings.TrimSpace(t) != "" {
|
|
||||||
nonblockingTaintsMap[strings.TrimSpace(t)] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n := node
|
|
||||||
if len(nonblockingTaintsMap) > 0 {
|
|
||||||
nodeCopy := node.DeepCopy()
|
|
||||||
nodeCopy.Spec.Taints = []v1.Taint{}
|
|
||||||
for _, v := range node.Spec.Taints {
|
|
||||||
if _, isNonblockingTaint := nonblockingTaintsMap[v.Key]; !isNonblockingTaint {
|
|
||||||
nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n = nodeCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
_, untolerated := v1helper.FindMatchingUntoleratedTaint(n.Spec.Taints, fakePod.Spec.Tolerations, func(t *v1.Taint) bool {
|
|
||||||
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
|
|
||||||
})
|
|
||||||
return !untolerated
|
|
||||||
}
|
|
||||||
|
17
test/integration/scheduler_perf/config/node-with-taint.yaml
Normal file
17
test/integration/scheduler_perf/config/node-with-taint.yaml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Node
|
||||||
|
metadata:
|
||||||
|
generateName: taint-node-
|
||||||
|
spec:
|
||||||
|
taints:
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: foo
|
||||||
|
status:
|
||||||
|
capacity:
|
||||||
|
pods: "110"
|
||||||
|
cpu: "4"
|
||||||
|
memory: 32Gi
|
||||||
|
conditions:
|
||||||
|
- status: "True"
|
||||||
|
type: Ready
|
||||||
|
phase: Running
|
@ -630,3 +630,31 @@
|
|||||||
initNamespaces: 100
|
initNamespaces: 100
|
||||||
measurePods: 1000
|
measurePods: 1000
|
||||||
|
|
||||||
|
- name: SchedulingWithNodeInclusionPolicy
|
||||||
|
featureGates:
|
||||||
|
NodeInclusionPolicyInPodTopologySpread: true
|
||||||
|
defaultPodTemplatePath: config/pod-with-node-inclusion-policy.yaml
|
||||||
|
workloadTemplate:
|
||||||
|
- opcode: createNodes
|
||||||
|
countParam: $normalNodes
|
||||||
|
uniqueNodeLabelStrategy:
|
||||||
|
labelKey: kubernetes.io/hostname
|
||||||
|
- opcode: createNodes
|
||||||
|
nodeTemplatePath: config/node-with-taint.yaml
|
||||||
|
countParam: $taintNodes
|
||||||
|
uniqueNodeLabelStrategy:
|
||||||
|
labelKey: kubernetes.io/hostname
|
||||||
|
- opcode: createPods
|
||||||
|
countParam: $measurePods
|
||||||
|
collectMetrics: true
|
||||||
|
workloads:
|
||||||
|
- name: 500Nodes
|
||||||
|
params:
|
||||||
|
taintNodes: 100
|
||||||
|
normalNodes: 400
|
||||||
|
measurePods: 400
|
||||||
|
- name: 5000Nodes
|
||||||
|
params:
|
||||||
|
taintNodes: 1000
|
||||||
|
normalNodes: 4000
|
||||||
|
measurePods: 4000
|
||||||
|
@ -0,0 +1,28 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
generateName: spreading-pod-with-node-inclusion-policy-
|
||||||
|
labels:
|
||||||
|
foo: bar
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: registry.k8s.io/pause:3.8
|
||||||
|
name: pause
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 500Mi
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
whenUnsatisfiable: DoNotSchedule
|
||||||
|
NodeAffinityPolicy: Honor
|
||||||
|
NodeTaintsPolicy: Honor
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
foo: bar
|
@ -988,7 +988,7 @@ func waitUntilPodsScheduledInNamespace(ctx context.Context, podInformer coreinfo
|
|||||||
if len(scheduled) >= wantCount {
|
if len(scheduled) >= wantCount {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
klog.Infof("%s: namespace %s: got %d pods, want %d", name, namespace, len(scheduled), wantCount)
|
klog.Infof("%s: namespace %s, pods: want %d, got %d", name, namespace, wantCount, len(scheduled))
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod)
|
|||||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v ", err)
|
return false, fmt.Errorf("failed to create object with non-retriable error: %v ", err)
|
||||||
}
|
}
|
||||||
return RetryWithExponentialBackOff(createFunc)
|
return RetryWithExponentialBackOff(createFunc)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user