Merge pull request #35284 from jsafrane/fix-class-tests

Automatic merge from submit-queue

Add test for provisioning with storage class

This PR re-introduces e2e test for dynamic provisioning with storage classes.

It adds the same test as it was merged in PR #32485 with an extra patch adding region to AWS calls. It works well on my AWS setup, however I'm using shared company account and I can't run kube-up.sh and run the tests in the "official" way.

@zmerlynn, can you please try to run tests that led to #34961?

@justinsb, you're my AWS guru, would there be a way how to introduce fully initialized AWS cloud provider into e2e test framework? It would simplify everything. GCE has it there, but it's easier to initialize, I guess. See https://github.com/kubernetes/kubernetes/blob/master/test/e2e/pd.go#L486 for example - IMO tests should not talk to AWS directly.
This commit is contained in:
Kubernetes Submit Queue 2017-04-07 11:40:34 -07:00 committed by GitHub
commit de1cee38bf
6 changed files with 409 additions and 144 deletions

View File

@ -133,6 +133,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}"
--provider="${KUBERNETES_PROVIDER}" \
--gce-project="${PROJECT:-}" \
--gce-zone="${ZONE:-}" \
--gce-multizone="${MULTIZONE:-false}" \
--gke-cluster="${CLUSTER_NAME:-}" \
--kube-master="${KUBE_MASTER:-}" \
--cluster-tag="${CLUSTER_ID:-}" \

View File

@ -281,6 +281,7 @@ garbage-collector-enabled
gather-logs-sizes
gather-metrics-at-teardown
gather-resource-usage
gce-multizone
gce-project
gce-service-account
gce-upgrade-script

View File

@ -77,10 +77,11 @@ type Disks interface {
// GCECloud implements Disks.
var _ Disks = (*GCECloud)(nil)
type gceDisk struct {
type GCEDisk struct {
Zone string
Name string
Kind string
Type string
}
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
@ -259,7 +260,7 @@ func (gce *GCECloud) DeleteDisk(diskToDelete string) error {
// If zone is specified, the volume will only be found in the specified zone,
// otherwise all managed zones will be searched.
func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
var disk *gceDisk
var disk *GCEDisk
var err error
if zone == "" {
// We would like as far as possible to avoid this case,
@ -268,7 +269,7 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
// by name, so we have to continue to support that.
// However, wherever possible the zone should be passed (and it is passed
// for most cases that we can control, e.g. dynamic volume provisioning)
disk, err = gce.getDiskByNameUnknownZone(name)
disk, err = gce.GetDiskByNameUnknownZone(name)
if err != nil {
return nil, err
}
@ -300,16 +301,17 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
return labels, nil
}
// Returns a gceDisk for the disk, if it is found in the specified zone.
// Returns a GCEDisk for the disk, if it is found in the specified zone.
// If not found, returns (nil, nil)
func (gce *GCECloud) findDiskByName(diskName string, zone string) (*gceDisk, error) {
func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, error) {
dc := contextWithNamespace(diskName, "gce_list_disk")
disk, err := gce.service.Disks.Get(gce.projectID, zone, diskName).Context(dc).Do()
if err == nil {
d := &gceDisk{
d := &GCEDisk{
Zone: lastComponent(disk.Zone),
Name: disk.Name,
Kind: disk.Kind,
Type: disk.Type,
}
return d, nil
}
@ -320,7 +322,7 @@ func (gce *GCECloud) findDiskByName(diskName string, zone string) (*gceDisk, err
}
// Like findDiskByName, but returns an error if the disk is not found
func (gce *GCECloud) getDiskByName(diskName string, zone string) (*gceDisk, error) {
func (gce *GCECloud) getDiskByName(diskName string, zone string) (*GCEDisk, error) {
disk, err := gce.findDiskByName(diskName, zone)
if disk == nil && err == nil {
return nil, fmt.Errorf("GCE persistent disk not found: diskName=%q zone=%q", diskName, zone)
@ -331,7 +333,7 @@ func (gce *GCECloud) getDiskByName(diskName string, zone string) (*gceDisk, erro
// Scans all managed zones to return the GCE PD
// Prefer getDiskByName, if the zone can be established
// Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone
func (gce *GCECloud) getDiskByNameUnknownZone(diskName string) (*gceDisk, error) {
func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) {
// Note: this is the gotcha right now with GCE PD support:
// disk names are not unique per-region.
// (I can create two volumes with name "myvol" in e.g. us-central1-b & us-central1-f)
@ -342,7 +344,7 @@ func (gce *GCECloud) getDiskByNameUnknownZone(diskName string) (*gceDisk, error)
// admission control, but that might be a little weird (values changing
// on create)
var found *gceDisk
var found *GCEDisk
for _, zone := range gce.managedZones {
disk, err := gce.findDiskByName(diskName, zone)
if err != nil {
@ -383,7 +385,7 @@ func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) {
}
func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
disk, err := gce.getDiskByNameUnknownZone(diskToDelete)
disk, err := gce.GetDiskByNameUnknownZone(diskToDelete)
if err != nil {
return err
}
@ -398,7 +400,7 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
}
// Converts a Disk resource to an AttachedDisk resource.
func (gce *GCECloud) convertDiskToAttachedDisk(disk *gceDisk, readWrite string) *compute.AttachedDisk {
func (gce *GCECloud) convertDiskToAttachedDisk(disk *GCEDisk, readWrite string) *compute.AttachedDisk {
return &compute.AttachedDisk{
DeviceName: disk.Name,
Kind: disk.Kind,

View File

@ -72,7 +72,10 @@ func setupProviderConfig() error {
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
managedZones := []string{zone} // Only single-zone for now
managedZones := []string{} // Manage all zones in the region
if !framework.TestContext.CloudConfig.MultiZone {
managedZones = []string{zone}
}
cloudConfig.Provider, err = gcecloud.CreateGCECloud(framework.TestContext.CloudConfig.ProjectID, region, zone, managedZones, "" /* networkUrl */, nil /* nodeTags */, "" /* nodeInstancePerfix */, nil /* tokenSource */, false /* useMetadataServer */)
if err != nil {
return fmt.Errorf("Error building GCE/GKE provider: %v", err)

View File

@ -129,6 +129,7 @@ type NodeTestContextType struct {
type CloudConfig struct {
ProjectID string
Zone string
MultiZone bool
Cluster string
MasterName string
NodeInstanceGroup string
@ -194,6 +195,7 @@ func RegisterClusterFlags() {
flag.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
flag.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")
flag.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.")

View File

@ -18,15 +18,22 @@ package e2e
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/api/v1"
@ -38,42 +45,75 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
type storageClassTest struct {
name string
cloudProviders []string
provisioner string
parameters map[string]string
claimSize string
expectedSize string
pvCheck func(volume *v1.PersistentVolume) error
}
const (
// Requested size of the volume
requestedSize = "1500Mi"
// Plugin name of the external provisioner
externalPluginName = "example.com/nfs"
)
func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, expectedSize string) {
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
func testDynamicProvisioning(t storageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) {
var err error
if class != nil {
By("creating a StorageClass " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
client.StorageV1().StorageClasses().Delete(class.Name, nil)
}()
}
By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting claim %s/%s", claim.Namespace, claim.Name)
client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
}()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
expectedCapacity := resource.MustParse(t.expectedSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
requestedCapacity := resource.MustParse(requestedSize)
requestedCapacity := resource.MustParse(t.claimSize)
claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
// Check PV properties
By("checking the PV")
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
// Run the checker
if t.pvCheck != nil {
err = t.pvCheck(pv)
Expect(err).NotTo(HaveOccurred())
}
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
@ -86,15 +126,84 @@ func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVol
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
By("deleting the claim")
framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
// Wait for the PV to get deleted. Technically, the first few delete
// attempts may fail, as the volume is still attached to a node because
// kubelet is slowly cleaning up a pod, however it should succeed in a
// couple of minutes. Wait 20 minutes to recover from random cloud hiccups.
// kubelet is slowly cleaning up the previous pod, however it should succeed
// in a couple of minutes. Wait 20 minutes to recover from random cloud
// hiccups.
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
// instantiate full AWS provider, therefore we need use ec2 API directly.
func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
diskName := volume.Spec.AWSElasticBlockStore.VolumeID
var client *ec2.EC2
tokens := strings.Split(diskName, "/")
volumeID := tokens[len(tokens)-1]
zone := framework.TestContext.CloudConfig.Zone
if len(zone) > 0 {
region := zone[:len(zone)-1]
cfg := aws.Config{Region: &region}
framework.Logf("using region %s", region)
client = ec2.New(session.New(), &cfg)
} else {
framework.Logf("no region configured")
client = ec2.New(session.New())
}
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
info, err := client.DescribeVolumes(request)
if err != nil {
return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
}
if len(info.Volumes) == 0 {
return fmt.Errorf("no volumes found for volume %q", volumeID)
}
if len(info.Volumes) > 1 {
return fmt.Errorf("multiple volumes found for volume %q", volumeID)
}
awsVolume := info.Volumes[0]
if awsVolume.VolumeType == nil {
return fmt.Errorf("expected volume type %q, got nil", volumeType)
}
if *awsVolume.VolumeType != volumeType {
return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
}
if encrypted && awsVolume.Encrypted == nil {
return fmt.Errorf("expected encrypted volume, got no encryption")
}
if encrypted && !*awsVolume.Encrypted {
return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
}
return nil
}
func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
cloud, err := framework.GetGCECloud()
if err != nil {
return err
}
diskName := volume.Spec.GCEPersistentDisk.PDName
disk, err := cloud.GetDiskByNameUnknownZone(diskName)
if err != nil {
return err
}
if !strings.HasSuffix(disk.Type, volumeType) {
return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
}
return nil
}
var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
f := framework.NewDefaultFramework("volume-provisioning")
@ -108,60 +217,178 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
})
framework.KubeDescribe("DynamicProvisioner", func() {
It("should create and delete persistent volumes [Slow] [Volume]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
It("should provision storage with different parameters [Slow] [Volume]", func() {
cloudZone := getRandomCloudZone(c)
By("creating a StorageClass")
class := newStorageClass("", "internal")
class, err := c.StorageV1().StorageClasses().Create(class)
defer c.StorageV1().StorageClasses().Delete(class.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("creating a claim with a dynamic provisioning annotation")
claim := newClaim(ns)
claim.Spec.StorageClassName = &class.Name
defer func() {
c.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)
}()
claim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
if framework.ProviderIs("vsphere") {
// vsphere provider does not allocate volumes in 1GiB chunks, so setting expected size
// equal to requestedSize
testDynamicProvisioning(c, claim, requestedSize)
} else {
// Expected size of the volume is 2GiB, because the other three supported cloud
// providers allocate volumes in 1GiB chunks.
testDynamicProvisioning(c, claim, "2Gi")
}
})
})
framework.KubeDescribe("DynamicProvisioner Beta", func() {
It("should create and delete persistent volumes [Slow] [Volume]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a StorageClass")
class := newBetaStorageClass("", "beta")
class, err := c.StorageV1beta1().StorageClasses().Create(class)
defer deleteStorageClass(c, class.Name)
Expect(err).NotTo(HaveOccurred())
By("creating a claim with a dynamic provisioning annotation")
claim := newClaim(ns)
claim.Annotations = map[string]string{
v1.BetaStorageClassAnnotation: class.Name,
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []storageClassTest{
{
"SSD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
"type": "pd-ssd",
"zone": cloudZone,
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
},
},
{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
"type": "pd-standard",
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
},
// AWS
{
"gp2 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"type": "gp2",
"zone": cloudZone,
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", false)
},
},
{
"io1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"type": "io1",
"iopsPerGB": "50",
},
"3.5Gi",
"4Gi", // 4 GiB is minimum for io1
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "io1", false)
},
},
{
"sc1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"type": "sc1",
},
"500Gi", // minimum for sc1
"500Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "sc1", false)
},
},
{
"st1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"type": "st1",
},
"500Gi", // minimum for st1
"500Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
},
},
{
"encrypted EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"encrypted": "true",
},
"1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
"generic Cinder volume on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
},
{
"Cinder volume with empty volume type and zone on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{
"type": "",
"availability": "",
},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
},
// vSphere generic test
{
"generic vSphere volume",
[]string{"vsphere"},
"kubernetes.io/vsphere-volume",
map[string]string{},
"1.5Gi",
"1.5Gi",
nil,
},
}
defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}()
claim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
var betaTest *storageClassTest
for i, t := range tests {
// Beware of clojure, use local variables instead of those from
// outer scope
test := t
testDynamicProvisioning(c, claim, "2Gi")
if !framework.ProviderIs(test.cloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.name, test.cloudProviders)
continue
}
// Remember the last supported test for subsequent test of beta API
betaTest = &test
By("Testing " + test.name)
suffix := fmt.Sprintf("%d", i)
class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
testDynamicProvisioning(test, c, claim, class)
}
// Run the last test with storage.k8s.io/v1beta1 and beta annotation on pvc
if betaTest != nil {
By("Testing " + betaTest.name + " with beta volume provisioning")
class := newBetaStorageClass(*betaTest, "beta")
// we need to create the class manually, testDynamicProvisioning does not accept beta class
class, err := c.StorageV1beta1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
claim := newClaim(*betaTest, ns, "beta")
claim.Annotations = map[string]string{
v1.BetaStorageClassAnnotation: class.Name,
}
testDynamicProvisioning(*betaTest, c, claim, nil)
}
})
// NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout) when there is
@ -197,17 +424,21 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
}
By("Creating a StorageClass for the unmanaged zone")
sc := newBetaStorageClass("", suffix)
// Set an unmanaged zone.
sc.Parameters = map[string]string{"zone": unmanagedZone}
sc, err = c.StorageV1beta1().StorageClasses().Create(sc)
test := storageClassTest{
name: "unmanaged_zone",
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{"zone": unmanagedZone},
claimSize: "1Gi",
}
sc := newStorageClass(test, ns, suffix)
sc, err = c.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, sc.Name)
By("Creating a claim and expecting it to timeout")
pvc := newClaim(ns)
pvc := newClaim(test, ns, suffix)
pvc.Spec.StorageClassName = &sc.Name
pvc, err = c.Core().PersistentVolumeClaims(ns).Create(pvc)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
@ -223,21 +454,27 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
// not being deleted.
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere")
const raceAttempts int = 100
var residualPVs []*v1.PersistentVolume
By("Creating a StorageClass")
class := newBetaStorageClass("kubernetes.io/gce-pd", "")
class, err := c.StorageV1beta1().StorageClasses().Create(class)
By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
test := storageClassTest{
name: "deletion race",
provisioner: "", // Use a native one based on current cloud provider
claimSize: "1Gi",
}
class := newStorageClass(test, ns, "race")
class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
framework.Logf("Created StorageClass %q", class.Name)
By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
claim := newClaim(ns)
// To increase chance of detection, attempt multiple iterations
for i := 0; i < raceAttempts; i++ {
suffix := fmt.Sprintf("race-%d", i)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
tmpClaim := framework.CreatePVC(c, ns, claim)
framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)
}
@ -254,7 +491,7 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
if len(residualPVs) > 0 {
framework.Logf("Remaining PersistentVolumes:")
for i, pv := range residualPVs {
framework.Logf("%s%d) %s", "\t", i+1, pv.Name)
framework.Logf("\t%d) %s", i+1, pv.Name)
}
framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs))
}
@ -267,20 +504,17 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere")
By("creating a claim with an alpha dynamic provisioning annotation")
claim := newClaim(ns)
claim.Annotations = map[string]string{v1.AlphaStorageClassAnnotation: ""}
defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}()
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
if framework.ProviderIs("vsphere") {
testDynamicProvisioning(c, claim, requestedSize)
} else {
testDynamicProvisioning(c, claim, "2Gi")
test := storageClassTest{
name: "alpha test",
claimSize: "2Gi",
expectedSize: "2Gi",
}
claim := newClaim(test, ns, "alpha")
claim.Annotations = map[string]string{
v1.AlphaStorageClassAnnotation: "true",
}
testDynamicProvisioning(test, c, claim, nil)
})
})
@ -301,29 +535,24 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
defer framework.DeletePodOrFail(c, ns, pod.Name)
By("creating a StorageClass")
class := newStorageClass(externalPluginName, "external")
class, err = c.StorageV1().StorageClasses().Create(class)
defer deleteStorageClass(c, class.Name)
Expect(err).NotTo(HaveOccurred())
By("creating a claim with a dynamic provisioning annotation")
claim := newClaim(ns)
test := storageClassTest{
name: "external provisioner test",
provisioner: externalPluginName,
claimSize: "1500Mi",
expectedSize: "1500Mi",
}
class := newStorageClass(test, ns, "external")
className := class.Name
claim := newClaim(test, ns, "external")
// the external provisioner understands Beta only right now, see
// https://github.com/kubernetes-incubator/external-storage/issues/37
// claim.Spec.StorageClassName = &className
claim.Annotations = map[string]string{
v1.BetaStorageClassAnnotation: className,
}
defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}()
claim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
// Expected size of the externally provisioned volume depends on the external
// provisioner: for nfs-provisioner used here, it's equal to requested
testDynamicProvisioning(c, claim, requestedSize)
By("creating a claim with a external provisioning annotation")
testDynamicProvisioning(test, c, claim, class)
})
})
@ -332,24 +561,26 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
By("creating a claim with no annotation")
claim := newClaim(ns)
test := storageClassTest{
name: "default",
claimSize: "2Gi",
expectedSize: "2Gi",
}
claim := newClaim(test, ns, "default")
defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}()
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
if framework.ProviderIs("vsphere") {
testDynamicProvisioning(c, claim, requestedSize)
} else {
testDynamicProvisioning(c, claim, "2Gi")
}
testDynamicProvisioning(test, c, claim, nil)
})
// Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by changing the default annotation[Slow] [Serial] [Disruptive] [Volume]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere")
scName := getDefaultStorageClassName(c)
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
By("setting the is-default StorageClass annotation to false")
verifyDefaultStorageClass(c, scName, true)
@ -357,18 +588,18 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
updateDefaultStorageClass(c, scName, "false")
By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(ns)
claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}()
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
claim, err = c.Core().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
})
@ -377,6 +608,10 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
It("should be disabled by removing the default annotation[Slow] [Serial] [Disruptive] [Volume]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere")
scName := getDefaultStorageClassName(c)
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
By("removing the is-default StorageClass annotation")
verifyDefaultStorageClass(c, scName, true)
@ -384,18 +619,18 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
updateDefaultStorageClass(c, scName, "")
By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(ns)
claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}()
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
claim, err = c.Core().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
})
@ -454,7 +689,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr
verifyDefaultStorageClass(c, scName, expectedDefault)
}
func newClaim(ns string) *v1.PersistentVolumeClaim {
func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
@ -466,7 +701,7 @@ func newClaim(ns string) *v1.PersistentVolumeClaim {
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(requestedSize),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(t.claimSize),
},
},
},
@ -514,7 +749,7 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
},
},
}
pod, err := c.Core().Pods(ns).Create(pod)
pod, err := c.CoreV1().Pods(ns).Create(pod)
defer func() {
framework.DeletePodOrFail(c, ns, pod.Name)
}()
@ -536,28 +771,32 @@ func getDefaultPluginName() string {
return ""
}
func newStorageClass(pluginName, suffix string) *storage.StorageClass {
func newStorageClass(t storageClassTest, ns string, suffix string) *storage.StorageClass {
pluginName := t.provisioner
if pluginName == "" {
pluginName = getDefaultPluginName()
}
if suffix == "" {
suffix = "sc"
}
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: suffix + "-",
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: pluginName,
Parameters: t.parameters,
}
}
// TODO: remove when storage.k8s.io/v1beta1 and beta storage class annotations
// are removed.
func newBetaStorageClass(pluginName, suffix string) *storagebeta.StorageClass {
func newBetaStorageClass(t storageClassTest, suffix string) *storagebeta.StorageClass {
pluginName := t.provisioner
if pluginName == "" {
pluginName = getDefaultPluginName()
}
@ -573,11 +812,12 @@ func newBetaStorageClass(pluginName, suffix string) *storagebeta.StorageClass {
GenerateName: suffix + "-",
},
Provisioner: pluginName,
Parameters: t.parameters,
}
}
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.Core().Pods(ns)
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
@ -657,12 +897,12 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) {
remainingPVs = []*v1.PersistentVolume{}
allPVs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{})
allPVs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
return true, err
}
for _, pv := range allPVs.Items {
if pv.Annotations[v1.BetaStorageClassAnnotation] == scName {
if v1.GetPersistentVolumeClass(&pv) == scName {
remainingPVs = append(remainingPVs, &pv)
}
}
@ -677,7 +917,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(c clientset.Interface, className string) {
err := c.Storage().StorageClasses().Delete(className, nil)
err := c.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
@ -690,3 +930,19 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
framework.DeletePersistentVolume(c, pv.Name)
}
}
func getRandomCloudZone(c clientset.Interface) string {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[metav1.LabelZoneFailureDomain]; found {
zones.Insert(zone)
}
}
// return "" in case that no node has zone label
zone, _ := zones.PopAny()
return zone
}