skip parallel volume cloning test for gce pd and fix disk not ready error for gce pd

This commit is contained in:
Alexis MacAskill 2021-11-10 18:50:37 +00:00
parent c1153d3353
commit 8102bbe05a
4 changed files with 87 additions and 49 deletions

View File

@ -26,6 +26,7 @@ const (
podDeleteTimeout = 5 * time.Minute
claimProvisionTimeout = 5 * time.Minute
claimProvisionShortTimeout = 1 * time.Minute
dataSourceProvisionTimeout = 5 * time.Minute
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
@ -56,6 +57,9 @@ type TimeoutContext struct {
// ClaimProvision is how long claims have to become dynamically provisioned.
ClaimProvision time.Duration
// DataSourceProvision is how long claims have to become dynamically provisioned from source claim.
DataSourceProvision time.Duration
// ClaimProvisionShort is the same as `ClaimProvision`, but shorter.
ClaimProvisionShort time.Duration
@ -96,6 +100,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext {
PodDelete: podDeleteTimeout,
ClaimProvision: claimProvisionTimeout,
ClaimProvisionShort: claimProvisionShortTimeout,
DataSourceProvision: dataSourceProvisionTimeout,
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,

View File

@ -41,6 +41,7 @@ package volume
import (
"context"
"crypto/sha256"
"fmt"
"path/filepath"
"strconv"
@ -51,6 +52,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
@ -235,6 +237,73 @@ func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod
return pod, ip
}
// GetVolumeAttachmentName returns the hash value of the provisioner, the config ClientNodeSelection name,
// and the VolumeAttachment name of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
var nodeName string
// For provisioning tests, ClientNodeSelection is not set so we do not know the NodeName of the VolumeAttachment of the PV that is
// bound to the PVC with the passed in claimName and claimNamespace. We need this NodeName because it is used to generate the
// attachmentName that is returned, and used to look up a certain VolumeAttachment in WaitForVolumeAttachmentTerminated.
// To get the nodeName of the VolumeAttachment, we get all the VolumeAttachments, look for the VolumeAttachment with a
// PersistentVolumeName equal to the PV that is bound to the passed in PVC, and then we get the NodeName from that VolumeAttachment.
if config.ClientNodeSelection.Name == "" {
claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
pvName := claim.Spec.VolumeName
volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
for _, volumeAttachment := range volumeAttachments.Items {
if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName {
nodeName = volumeAttachment.Spec.NodeName
break
}
}
} else {
nodeName = config.ClientNodeSelection.Name
}
handle := getVolumeHandle(cs, claimName, claimNamespace)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName)))
return fmt.Sprintf("csi-%x", attachmentHash)
}
// getVolumeHandle returns the VolumeHandle of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace string) string {
// re-get the claim to the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
}
if pv.Spec.CSI == nil {
gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
return ""
}
return pv.Spec.CSI.VolumeHandle
}
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
if waitErr != nil {
return fmt.Errorf("error waiting volume attachment %v to terminate: %v", attachmentName, waitErr)
}
return nil
}
// startVolumeServer starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.

View File

@ -18,7 +18,6 @@ package storage
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"math/rand"
@ -374,9 +373,8 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
framework.ExpectNoError(err, "Failed to start pod: %v", err)
ginkgo.By("Checking if VolumeAttachment was created for the pod")
handle := getVolumeHandle(m.cs, claim)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeSelection.Name)))
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
testConfig := storageframework.ConvertTestConfig(m.config)
attachmentName := e2evolume.GetVolumeAttachmentName(m.cs, testConfig, m.provisioner, claim.Name, claim.Namespace)
_, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
@ -425,9 +423,8 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// VolumeAttachment should be created because the default value for CSI attachable is true
ginkgo.By("Checking if VolumeAttachment was created for the pod")
handle := getVolumeHandle(m.cs, claim)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeSelection.Name)))
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
testConfig := storageframework.ConvertTestConfig(m.config)
attachmentName := e2evolume.GetVolumeAttachmentName(m.cs, testConfig, m.provisioner, claim.Name, claim.Namespace)
_, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
@ -461,7 +458,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.By(fmt.Sprintf("Wait for the volumeattachment to be deleted up to %v", csiVolumeAttachmentTimeout))
// This step can be slow because we have to wait either a NodeUpdate event happens or
// the detachment for this volume timeout so that we can do a force detach.
err = waitForVolumeAttachmentTerminated(attachmentName, m.cs)
err = e2evolume.WaitForVolumeAttachmentTerminated(attachmentName, m.cs, csiVolumeAttachmentTimeout)
framework.ExpectNoError(err, "Failed to delete VolumeAttachment: %v", err)
})
})
@ -2084,24 +2081,6 @@ func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
return nil
}
func waitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface) error {
waitErr := wait.PollImmediate(10*time.Second, csiVolumeAttachmentTimeout, func() (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
if waitErr != nil {
return fmt.Errorf("error waiting volume attachment %v to terminate: %v", attachmentName, waitErr)
}
return nil
}
func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Interface) (int32, error) {
var attachLimit int32
@ -2426,26 +2405,6 @@ func destroyCSIDriver(cs clientset.Interface, driverName string) {
}
}
func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
// re-get the claim to the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
}
if pv.Spec.CSI == nil {
gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
return ""
}
return pv.Spec.CSI.VolumeHandle
}
func getVolumeLimitFromCSINode(csiNode *storagev1.CSINode, driverName string) int32 {
for _, d := range csiNode.Spec.Drivers {
if d.Name != driverName {

View File

@ -163,6 +163,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
Claim: l.pvc,
SourceClaim: l.sourcePVC,
Class: l.sc,
Provisioner: l.sc.Provisioner,
ClaimSize: claimSize,
ExpectedSize: claimSize,
VolumeMode: pattern.VolMode,
@ -254,7 +255,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
defer dataSourceCleanup()
l.pvc.Spec.DataSource = dataSource
l.testCase.NodeSelection = testConfig.ClientNodeSelection
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
@ -269,6 +269,9 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
}
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
l.testCase.TestDynamicProvisioning()
})
@ -322,6 +325,9 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
e2evolume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests)
}
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
t.TestDynamicProvisioning()
}(i)
}
@ -377,7 +383,6 @@ func SetupStorageClass(
// see #ProvisionStorageClass
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
var err error
client := t.Client
gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
claim := t.Claim
@ -413,7 +418,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
}
var pod *v1.Pod
pod, err := e2epod.CreateSecPod(client, podConfig, framework.PodStartTimeout)
pod, err := e2epod.CreateSecPod(client, podConfig, t.Timeouts.DataSourceProvision)
// Delete pod now, otherwise PV can't be deleted below
framework.ExpectNoError(err)
e2epod.DeletePodOrFail(client, pod.Namespace, pod.Name)