mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #80939 from draveness/feature/use-workqueue-in-volume-binding
fix: use workqueue to speed up of volume binding tests
This commit is contained in:
commit
01dc5b1da3
@ -32,6 +32,7 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||||
"//test/integration/framework:go_default_library",
|
"//test/integration/framework:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -19,6 +19,7 @@ package volumescheduling
|
|||||||
// This file tests the VolumeScheduling feature.
|
// This file tests the VolumeScheduling feature.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -38,6 +39,7 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/util/workqueue"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||||
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
|
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
|
||||||
@ -77,8 +79,8 @@ var (
|
|||||||
const (
|
const (
|
||||||
node1 = "node-1"
|
node1 = "node-1"
|
||||||
node2 = "node-2"
|
node2 = "node-2"
|
||||||
podLimit = 100
|
podLimit = 50
|
||||||
volsPerPod = 5
|
volsPerPod = 3
|
||||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||||
provisionerPluginName = "kubernetes.io/mock-provisioner"
|
provisionerPluginName = "kubernetes.io/mock-provisioner"
|
||||||
)
|
)
|
||||||
@ -406,7 +408,7 @@ func TestVolumeBindingDynamicStressFast(t *testing.T) {
|
|||||||
|
|
||||||
// Like TestVolumeBindingStress but with slow dynamic provisioning
|
// Like TestVolumeBindingStress but with slow dynamic provisioning
|
||||||
func TestVolumeBindingDynamicStressSlow(t *testing.T) {
|
func TestVolumeBindingDynamicStressSlow(t *testing.T) {
|
||||||
testVolumeBindingStress(t, 0, true, 30)
|
testVolumeBindingStress(t, 0, true, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
|
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
|
||||||
@ -430,10 +432,12 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
klog.Infof("Start creating PVs and PVCs")
|
||||||
// Create enough PVs and PVCs for all the pods
|
// Create enough PVs and PVCs for all the pods
|
||||||
pvs := []*v1.PersistentVolume{}
|
podVolumesCount := podLimit * volsPerPod
|
||||||
pvcs := []*v1.PersistentVolumeClaim{}
|
pvs := make([]*v1.PersistentVolume, podVolumesCount)
|
||||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
pvcs := make([]*v1.PersistentVolumeClaim, podVolumesCount)
|
||||||
|
workqueue.ParallelizeUntil(context.TODO(), 16, podVolumesCount, func(i int) {
|
||||||
var (
|
var (
|
||||||
pv *v1.PersistentVolume
|
pv *v1.PersistentVolume
|
||||||
pvc *v1.PersistentVolumeClaim
|
pvc *v1.PersistentVolumeClaim
|
||||||
@ -452,7 +456,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
|
|||||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||||
}
|
}
|
||||||
pvs = append(pvs, pv)
|
pvs[i] = pv
|
||||||
}
|
}
|
||||||
if pv != nil && pv.Spec.ClaimRef != nil && pv.Spec.ClaimRef.Name == pvcName {
|
if pv != nil && pv.Spec.ClaimRef != nil && pv.Spec.ClaimRef.Name == pvcName {
|
||||||
pvc = makePVC(pvcName, config.ns, &classImmediate, pv.Name)
|
pvc = makePVC(pvcName, config.ns, &classImmediate, pv.Name)
|
||||||
@ -462,11 +466,12 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
|
|||||||
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||||
}
|
}
|
||||||
pvcs = append(pvcs, pvc)
|
pvcs[i] = pvc
|
||||||
}
|
})
|
||||||
|
|
||||||
pods := []*v1.Pod{}
|
klog.Infof("Start creating Pods")
|
||||||
for i := 0; i < podLimit; i++ {
|
pods := make([]*v1.Pod, podLimit)
|
||||||
|
workqueue.ParallelizeUntil(context.TODO(), 16, podLimit, func(i int) {
|
||||||
// Generate string of all the PVCs for the pod
|
// Generate string of all the PVCs for the pod
|
||||||
podPvcs := []string{}
|
podPvcs := []string{}
|
||||||
for j := i * volsPerPod; j < (i+1)*volsPerPod; j++ {
|
for j := i * volsPerPod; j < (i+1)*volsPerPod; j++ {
|
||||||
@ -477,24 +482,32 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
|
|||||||
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
pods = append(pods, pod)
|
pods[i] = pod
|
||||||
}
|
})
|
||||||
|
|
||||||
|
klog.Infof("Start validating pod scheduled")
|
||||||
// Validate Pods scheduled
|
// Validate Pods scheduled
|
||||||
for _, pod := range pods {
|
workqueue.ParallelizeUntil(context.TODO(), 16, len(pods), func(i int) {
|
||||||
|
pod := pods[i]
|
||||||
// Use increased timeout for stress test because there is a higher chance of
|
// Use increased timeout for stress test because there is a higher chance of
|
||||||
// PV sync error
|
// PV sync error
|
||||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 2*time.Minute); err != nil {
|
if err := waitForPodToScheduleWithTimeout(config.client, pod, 2*time.Minute); err != nil {
|
||||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
|
|
||||||
|
klog.Infof("Start validating PVCs scheduled")
|
||||||
// Validate PVC/PV binding
|
// Validate PVC/PV binding
|
||||||
for _, pvc := range pvcs {
|
workqueue.ParallelizeUntil(context.TODO(), 16, len(pvcs), func(i int) {
|
||||||
validatePVCPhase(t, config.client, pvc.Name, config.ns, v1.ClaimBound, dynamic)
|
validatePVCPhase(t, config.client, pvcs[i].Name, config.ns, v1.ClaimBound, dynamic)
|
||||||
}
|
})
|
||||||
for _, pv := range pvs {
|
|
||||||
validatePVPhase(t, config.client, pv.Name, v1.VolumeBound)
|
// Don't validate pv for dynamic provisioning test
|
||||||
|
if !dynamic {
|
||||||
|
klog.Infof("Start validating PVs scheduled")
|
||||||
|
workqueue.ParallelizeUntil(context.TODO(), 16, len(pvs), func(i int) {
|
||||||
|
validatePVPhase(t, config.client, pvs[i].Name, v1.VolumeBound)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -916,6 +929,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
|||||||
ns: ns,
|
ns: ns,
|
||||||
stop: context.stopCh,
|
stop: context.stopCh,
|
||||||
teardown: func() {
|
teardown: func() {
|
||||||
|
klog.Infof("test cluster %q start to tear down", ns)
|
||||||
deleteTestObjects(clientset, ns, nil)
|
deleteTestObjects(clientset, ns, nil)
|
||||||
cleanupTest(t, context)
|
cleanupTest(t, context)
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user