diff --git a/test/integration/volumescheduling/BUILD b/test/integration/volumescheduling/BUILD index 9df99b06142..ffa88937d3e 100644 --- a/test/integration/volumescheduling/BUILD +++ b/test/integration/volumescheduling/BUILD @@ -17,7 +17,6 @@ go_test( tags = ["integration"], deps = [ "//pkg/controller/volume/persistentvolume:go_default_library", - "//pkg/controller/volume/persistentvolume/options:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index f0e7e840cfd..8f9da448423 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -40,7 +40,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" - persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -216,6 +215,20 @@ func TestVolumeBinding(t *testing.T) { } } + // Wait for PVs to become available to avoid race condition in PV controller + // https://github.com/kubernetes/kubernetes/issues/85320 + for _, pvConfig := range test.pvs { + if err := waitForPVPhase(config.client, pvConfig.name, v1.VolumeAvailable); err != nil { + t.Fatalf("PersistentVolume %q failed to become available: %v", pvConfig.name, err) + } + } + + for _, pvConfig := range test.unboundPvs { + if err := waitForPVPhase(config.client, pvConfig.name, v1.VolumeAvailable); err != nil { + t.Fatalf("PersistentVolume %q failed to become available: %v", pvConfig.name, err) + } + } + // Create PVCs for _, pvcConfig := range test.pvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) @@ -926,7 +939,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t func initPVController(t *testing.T, context *testContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) { clientset := context.clientSet - // Informers factory for controllers, we disable resync period for testing. + // Informers factory for controllers informerFactory := informers.NewSharedInformerFactory(clientset, 0) // Start PV controller for volume binding. @@ -946,10 +959,11 @@ func initPVController(t *testing.T, context *testContext, provisionDelaySeconds } plugins := []volume.VolumePlugin{plugin} - controllerOptions := persistentvolumeoptions.NewPersistentVolumeControllerOptions() params := persistentvolume.ControllerParameters{ - KubeClient: clientset, - SyncPeriod: controllerOptions.PVClaimBinderSyncPeriod, + KubeClient: clientset, + // Use a frequent resync period to retry API update conflicts due to + // https://github.com/kubernetes/kubernetes/issues/85320 + SyncPeriod: 5 * time.Second, VolumePlugins: plugins, Cloud: nil, ClusterName: "volume-test-cluster", @@ -1180,6 +1194,20 @@ func validatePVPhase(t *testing.T, client clientset.Interface, pvName string, ph } } +func waitForPVPhase(client clientset.Interface, pvName string, phase v1.PersistentVolumePhase) error { + return wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { + pv, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + if pv.Status.Phase == phase { + return true, nil + } + return false, nil + }) +} + func waitForPVCBound(client clientset.Interface, pvc *v1.PersistentVolumeClaim) error { return wait.Poll(time.Second, 30*time.Second, func() (bool, error) { claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})