mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #67102 from ddebroy/ebs-e2e1
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.
End2End tests for DynamicVolumeProvisioning of EBS
**What this PR does / why we need it**:
Add end2end tests to exercise `DynamicProvisioningScheduling` features for EBS. The tests make sure `WaitForFirstConsumer` and `AllowedTopologies` specified in a EBS storage class has the desired effect.
**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #
**Special notes for your reviewer**:
Tests features added to 217a3d8902
**Release note**:
```
NONE
```
/sig storage
/assign @msau42 @jsafrane
This commit is contained in:
commit
89e57b5051
@ -43,6 +43,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -53,6 +54,7 @@ type storageClassTest struct {
|
||||
cloudProviders []string
|
||||
provisioner string
|
||||
parameters map[string]string
|
||||
delayBinding bool
|
||||
claimSize string
|
||||
expectedSize string
|
||||
pvCheck func(volume *v1.PersistentVolume) error
|
||||
@ -170,6 +172,102 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
||||
return pv
|
||||
}
|
||||
|
||||
func testBindingWaitForFirstConsumer(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) {
|
||||
var err error
|
||||
|
||||
By("creating a storage class " + class.Name)
|
||||
class, err = client.StorageV1().StorageClasses().Create(class)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer deleteStorageClass(client, class.Name)
|
||||
|
||||
By("creating a claim")
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace), "Failed to delete PVC ", claim.Name)
|
||||
}()
|
||||
|
||||
// Wait for ClaimProvisionTimeout and make sure the phase did not become Bound i.e. the Wait errors out
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("checking the claim is in pending state")
|
||||
// Get new copy of the claim
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
|
||||
|
||||
By("creating a pod referring to the claim")
|
||||
// Create a pod referring to the claim and wait for it to get to running
|
||||
pod, err := framework.CreateClientPod(client, claim.Namespace, claim)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
|
||||
}()
|
||||
|
||||
By("re-checking the claim to see it binded")
|
||||
// Get new copy of the claim
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// make sure claim did bind
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// collect node and pv details
|
||||
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return pv, node
|
||||
}
|
||||
|
||||
// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms
|
||||
// with key LabelZoneFailureDomain in PV's node affinity match zone
|
||||
func checkZoneLabelAndAffinity(pv *v1.PersistentVolume, zone string) {
|
||||
By("checking PV's zone label and node affinity terms match expected zone")
|
||||
if pv == nil {
|
||||
framework.Failf("nil pv passed")
|
||||
}
|
||||
pvLabel, ok := pv.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if !ok {
|
||||
framework.Failf("label %s not found on PV", kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
|
||||
if zone != pvLabel {
|
||||
framework.Failf("value of %s label for PV: %s does not match expected zone: %s", kubeletapis.LabelZoneFailureDomain, pvLabel, zone)
|
||||
}
|
||||
|
||||
if pv.Spec.NodeAffinity == nil {
|
||||
framework.Failf("node affinity not found in PV spec %v", pv.Spec)
|
||||
}
|
||||
|
||||
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
|
||||
framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
|
||||
}
|
||||
|
||||
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
|
||||
keyFound := false
|
||||
for _, r := range term.MatchExpressions {
|
||||
if r.Key == kubeletapis.LabelZoneFailureDomain {
|
||||
keyFound = true
|
||||
for _, val := range r.Values {
|
||||
if zone == val {
|
||||
framework.Logf("expected zone %s detected", val)
|
||||
} else {
|
||||
framework.Failf("zone %s does not match expected zone %s", val, zone)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !keyFound {
|
||||
framework.Failf("label %s not found in term %v", kubeletapis.LabelZoneFailureDomain, term)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
|
||||
// instantiate full AWS provider, therefore we need use ec2 API directly.
|
||||
func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
|
||||
@ -874,6 +972,85 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Describe("DynamicProvisioner delayed binding [Feature:DynamicProvisioningScheduling] [Slow]", func() {
|
||||
It("should create persistent volume in the same zone as node after a pod mounting the claim is started", func() {
|
||||
framework.SkipUnlessProviderIs("aws")
|
||||
|
||||
By("creating a claim with class with waitForFirstConsumer")
|
||||
test := storageClassTest{
|
||||
name: "Delayed binding EBS storage class test",
|
||||
provisioner: "kubernetes.io/aws-ebs",
|
||||
claimSize: "2Gi",
|
||||
delayBinding: true,
|
||||
}
|
||||
suffix := "delayed-ebs"
|
||||
class := newStorageClass(test, ns, suffix)
|
||||
claim := newClaim(test, ns, suffix)
|
||||
claim.Spec.StorageClassName = &class.Name
|
||||
pv, node := testBindingWaitForFirstConsumer(c, claim, class)
|
||||
if node == nil {
|
||||
framework.Failf("unexpected nil node found")
|
||||
}
|
||||
zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if !ok {
|
||||
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
checkZoneLabelAndAffinity(pv, zone)
|
||||
})
|
||||
})
|
||||
Describe("DynamicProvisioner allowedTopology [Feature:DynamicProvisioningScheduling]", func() {
|
||||
It("should create persistent volume in the zone specified in allowedTopology of storageclass", func() {
|
||||
framework.SkipUnlessProviderIs("aws")
|
||||
|
||||
By("creating a claim with class with allowedTopology set")
|
||||
test := storageClassTest{
|
||||
name: "Delayed binding EBS storage class test",
|
||||
provisioner: "kubernetes.io/aws-ebs",
|
||||
claimSize: "2Gi",
|
||||
expectedSize: "2Gi",
|
||||
}
|
||||
suffix := "topo-ebs"
|
||||
class := newStorageClass(test, ns, suffix)
|
||||
zone := getRandomCloudZone(c)
|
||||
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
|
||||
claim := newClaim(test, ns, suffix)
|
||||
claim.Spec.StorageClassName = &class.Name
|
||||
pv := testDynamicProvisioning(test, c, claim, class)
|
||||
checkZoneLabelAndAffinity(pv, zone)
|
||||
})
|
||||
})
|
||||
Describe("DynamicProvisioner delayed binding with allowedTopology [Feature:DynamicProvisioningScheduling] [Slow]", func() {
|
||||
It("should create persistent volume in the same zone as specified in allowedTopology after a pod mounting the claim is started", func() {
|
||||
framework.SkipUnlessProviderIs("aws")
|
||||
|
||||
By("creating a claim with class with waitForFirstConsumer")
|
||||
test := storageClassTest{
|
||||
name: "Delayed binding EBS storage class test",
|
||||
provisioner: "kubernetes.io/aws-ebs",
|
||||
claimSize: "2Gi",
|
||||
delayBinding: true,
|
||||
}
|
||||
suffix := "delayed-topo-ebs"
|
||||
class := newStorageClass(test, ns, suffix)
|
||||
topoZone := getRandomCloudZone(c)
|
||||
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
|
||||
claim := newClaim(test, ns, suffix)
|
||||
claim.Spec.StorageClassName = &class.Name
|
||||
pv, node := testBindingWaitForFirstConsumer(c, claim, class)
|
||||
if node == nil {
|
||||
framework.Failf("unexpected nil node found")
|
||||
}
|
||||
nodeZone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if !ok {
|
||||
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
if topoZone != nodeZone {
|
||||
framework.Failf("zone specified in AllowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, nodeZone)
|
||||
}
|
||||
checkZoneLabelAndAffinity(pv, topoZone)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func getDefaultStorageClassName(c clientset.Interface) string {
|
||||
@ -1016,6 +1193,18 @@ func getDefaultPluginName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storage.StorageClass, zone string) {
|
||||
term := v1.TopologySelectorTerm{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelZoneFailureDomain,
|
||||
Values: []string{zone},
|
||||
},
|
||||
},
|
||||
}
|
||||
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
|
||||
}
|
||||
|
||||
func newStorageClass(t storageClassTest, ns string, suffix string) *storage.StorageClass {
|
||||
pluginName := t.provisioner
|
||||
if pluginName == "" {
|
||||
@ -1024,6 +1213,10 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
|
||||
if suffix == "" {
|
||||
suffix = "sc"
|
||||
}
|
||||
bindingMode := storage.VolumeBindingImmediate
|
||||
if t.delayBinding {
|
||||
bindingMode = storage.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
return &storage.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
@ -1032,8 +1225,9 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
|
||||
// Name must be unique, so let's base it on namespace name
|
||||
Name: ns + "-" + suffix,
|
||||
},
|
||||
Provisioner: pluginName,
|
||||
Parameters: t.parameters,
|
||||
Provisioner: pluginName,
|
||||
Parameters: t.parameters,
|
||||
VolumeBindingMode: &bindingMode,
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user