From 2a08ce5c68a3ad47daa0c3be597adc810b3730a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Skocze=C5=84?= Date: Mon, 16 Sep 2024 10:33:05 +0000 Subject: [PATCH] Add scheduler_perf test case for AssignedPodDelete event handling --- .../poddelete-pod-blocker-affinity.yaml | 12 ++ ...-pod-blocker-topology-ports-resources.yaml | 27 ++++ .../poddelete-pod-interpodaffinity.yaml | 19 +++ .../poddelete-pod-nodeports.yaml | 13 ++ .../poddelete-pod-noderesources.yaml | 14 +++ .../poddelete-pod-nodevolumelimits.yaml | 10 ++ .../poddelete-pod-podtopologyspread.yaml | 19 +++ .../poddelete-pod-volumerestrictions.yaml | 14 +++ .../config/performance-config.yaml | 115 ++++++++++++++++++ .../config/templates/pvc-once-pod.yaml | 11 ++ test/integration/scheduler_perf/create.go | 5 +- test/utils/runners.go | 4 + 12 files changed, 262 insertions(+), 1 deletion(-) create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-affinity.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-topology-ports-resources.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-interpodaffinity.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodeports.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-noderesources.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodevolumelimits.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-podtopologyspread.yaml create mode 100644 test/integration/scheduler_perf/config/event_handling/poddelete-pod-volumerestrictions.yaml create mode 100644 test/integration/scheduler_perf/config/templates/pvc-once-pod.yaml diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-affinity.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-affinity.yaml new file mode 100644 index 00000000000..27b92e2d4c2 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-affinity.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-blocker-affinity- + labels: + color: green + type: blocker +spec: + containers: + - image: registry.k8s.io/pause:3.10 + name: pause + terminationGracePeriodSeconds: 0 diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-topology-ports-resources.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-topology-ports-resources.yaml new file mode 100644 index 00000000000..e6268f39228 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-blocker-topology-ports-resources.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-blocker-topology- + labels: + topology: blue + type: blocker +spec: + topologySpreadConstraints: + - maxSkew: 10 + minDomains: 10000 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + topology: blue + containers: + - image: registry.k8s.io/pause:3.10 + name: pause + ports: + - hostPort: 8{{ mod .Index 12 }} + containerPort: 8{{ mod .Index 12 }} + resources: + requests: + cpu: 0.35 + memory: 3Gi + terminationGracePeriodSeconds: 0 diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-interpodaffinity.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-interpodaffinity.yaml new file mode 100644 index 00000000000..47ee55fb5f5 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-interpodaffinity.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-unsched- + labels: + color: green + type: unsched +spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + color: green + topologyKey: kubernetes.io/hostname + namespaces: ["blockeraffinity"] + containers: + - image: registry.k8s.io/pause:3.10 + name: pause diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodeports.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodeports.yaml new file mode 100644 index 00000000000..f87e4851958 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodeports.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-unsched- + labels: + type: unsched +spec: + containers: + - image: registry.k8s.io/pause:3.10 + name: pause + ports: + - hostPort: 8{{ mod .Index 12 }} + containerPort: 8{{ mod .Index 12 }} diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-noderesources.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-noderesources.yaml new file mode 100644 index 00000000000..b8453f6356a --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-noderesources.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-unsched- + labels: + type: unsched +spec: + containers: + - image: registry.k8s.io/pause:3.10 + name: pause + resources: + requests: + cpu: 0.2 + memory: 1Gi diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodevolumelimits.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodevolumelimits.yaml new file mode 100644 index 00000000000..51529b43a50 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-nodevolumelimits.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-unsched- + labels: + type: unsched +spec: + containers: + - image: registry.k8s.io/pause:3.10 + name: pause diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-podtopologyspread.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-podtopologyspread.yaml new file mode 100644 index 00000000000..62748d00f12 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-podtopologyspread.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-unsched- + labels: + topology: blue + type: unsched +spec: + topologySpreadConstraints: + - maxSkew: 11 + minDomains: 10000 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + topology: blue + containers: + - image: registry.k8s.io/pause:3.10 + name: pause diff --git a/test/integration/scheduler_perf/config/event_handling/poddelete-pod-volumerestrictions.yaml b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-volumerestrictions.yaml new file mode 100644 index 00000000000..a0558e41ab2 --- /dev/null +++ b/test/integration/scheduler_perf/config/event_handling/poddelete-pod-volumerestrictions.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod-unsched- + labels: + type: unsched +spec: + containers: + - image: registry.k8s.io/pause:3.10 + name: pause + volumes: + - name: vol + persistentVolumeClaim: + claimName: pvc-{{ .Index }} diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index dd67ca05251..b1ec9e4bcdd 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -1548,3 +1548,118 @@ initNodes: 1000 deletingPods: 1000 measurePods: 1000 + +# This test case is used to measure the performance of queuing hints when handling the AssignedPodDelete events. +# First, two groups of blocker pods are created, which will prevents other pods from being scheduled. +# Then multiple types of pods are created, and each group is filtered by different plugin. +# Next, blocker pods are gradually deleted and previously unscheduled pods can be scheduled. +# Plugins covered: InterPodAffinity, NodePorts, NodeResources, NodeVolumeLimits, PodTopologySpread and VolumeRestrictions. +- name: EventHandlingPodDelete + featureGates: + SchedulerQueueingHints: true + workloadTemplate: + - opcode: createNodes + countParam: $initNodes + nodeTemplatePath: config/templates/node-default.yaml + # Allow max 20 volumes per node. + nodeAllocatableStrategy: + nodeAllocatable: + attachable-volumes-csi-ebs.csi.aws.com: "20" + csiNodeAllocatable: + ebs.csi.aws.com: + count: 20 + # Create pods that will block other pods from being scheduled. + # They'll block using NodePorts, NodeResources, NodeVolumeLimits and PodTopologySpread plugins. + - opcode: createPods + countParam: $blockerPods + podTemplatePath: config/event_handling/poddelete-pod-blocker-topology-ports-resources.yaml + persistentVolumeTemplatePath: config/templates/pv-csi.yaml + persistentVolumeClaimTemplatePath: config/templates/pvc.yaml + namespace: blockertopologyportsresources + # Create second group of pods that will block another pods from being scheduled. + # They'll block using InterPodAffinity and VolumeRestrictions plugins. + - opcode: createPods + countParam: $blockerPods + podTemplatePath: config/event_handling/poddelete-pod-blocker-affinity.yaml + persistentVolumeTemplatePath: config/templates/pv-csi.yaml + persistentVolumeClaimTemplatePath: config/templates/pvc-once-pod.yaml + namespace: blockeraffinity + # Collect metrics from all createPods ops below. + - opcode: startCollectingMetrics + name: unschedPods + namespaces: [blockertopologyportsresources, blockeraffinity, nodeports, noderesources, nodevolumelimits, interpodaffinity] + labelSelector: + type: unsched + # Create pods blocked using PodTopologySpread plugin. + # Note: for this plugin, namespace has to match the blocker's namespace, + # so has to be "blockertopologyportsresources". + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/event_handling/poddelete-pod-podtopologyspread.yaml + skipWaitToCompletion: true + namespace: blockertopologyportsresources + # Create pods blocked using VolumeRestrictions plugin. + # Note: these pods uses PVCs and PVs created for second blocker pods, + # so the count needs to be equal to $blockerPods + # and namespace has to be "blockeraffinity". + - opcode: createPods + countParam: $blockerPods + podTemplatePath: config/event_handling/poddelete-pod-volumerestrictions.yaml + skipWaitToCompletion: true + namespace: blockeraffinity + # Create pods blocked using NodePorts plugin. + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/event_handling/poddelete-pod-nodeports.yaml + skipWaitToCompletion: true + namespace: nodeports + # Create pods blocked using NodeResources plugin. + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/event_handling/poddelete-pod-noderesources.yaml + skipWaitToCompletion: true + namespace: noderesources + # Create pods blocked using NodeVolumeLimits plugin. + - opcode: createPods + countParam: $blockerPods + podTemplatePath: config/event_handling/poddelete-pod-nodevolumelimits.yaml + persistentVolumeTemplatePath: config/templates/pv-csi.yaml + persistentVolumeClaimTemplatePath: config/templates/pvc.yaml + skipWaitToCompletion: true + namespace: nodevolumelimits + # Create pods blocked using InterPodAffinity plugin. + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/event_handling/poddelete-pod-interpodaffinity.yaml + skipWaitToCompletion: true + namespace: interpodaffinity + # Wait for unschedulable pods to be processed by the scheduler. + - opcode: barrier + stageRequirement: Attempted + labelSelector: + type: unsched + # Start deleting blocker pods. + - opcode: deletePods + deletePodsPerSecond: 100 + namespace: blockertopologyportsresources + labelSelector: + type: blocker + skipWaitToCompletion: true + - opcode: deletePods + deletePodsPerSecond: 100 + namespace: blockeraffinity + labelSelector: + type: blocker + skipWaitToCompletion: true + # Wait for previously unschedulable pods to be scheduled. + - opcode: barrier + labelSelector: + type: unsched + - opcode: stopCollectingMetrics + workloads: + - name: 50Nodes_500Pods + labels: [performance, short] + params: + initNodes: 50 + blockerPods: 480 # Must be slightly below initNodes * 10 to be stable + measurePods: 500 # Must be initNodes * 10 diff --git a/test/integration/scheduler_perf/config/templates/pvc-once-pod.yaml b/test/integration/scheduler_perf/config/templates/pvc-once-pod.yaml new file mode 100644 index 00000000000..169a6724f73 --- /dev/null +++ b/test/integration/scheduler_perf/config/templates/pvc-once-pod.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + pv.kubernetes.io/bind-completed: "true" +spec: + accessModes: + - ReadWriteOncePod + resources: + requests: + storage: 1Gi diff --git a/test/integration/scheduler_perf/create.go b/test/integration/scheduler_perf/create.go index d44816ec8b7..0b279d57487 100644 --- a/test/integration/scheduler_perf/create.go +++ b/test/integration/scheduler_perf/create.go @@ -155,7 +155,10 @@ func getSpecFromTextTemplateFile(path string, env map[string]any, spec interface fm := template.FuncMap{"div": func(a, b int) int { return a / b }} - tmpl, err := template.New("object").Funcs(fm).Parse(string(content)) + modFn := template.FuncMap{"mod": func(a, b int) int { + return a % b + }} + tmpl, err := template.New("object").Funcs(fm).Funcs(modFn).Parse(string(content)) if err != nil { return err } diff --git a/test/utils/runners.go b/test/utils/runners.go index 35c002950c7..29a66bf2fe7 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -1194,6 +1194,10 @@ func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interfa // PVs are cluster-wide resources. // Prepend a namespace to make the name globally unique. pv.Name = fmt.Sprintf("%s-%s", namespace, pv.Name) + pvs := pv.Spec.PersistentVolumeSource + if pvs.CSI != nil { + pvs.CSI.VolumeHandle = pv.Name + } if bindVolume { // bind pv to "pvc-$i" pv.Spec.ClaimRef = &v1.ObjectReference{