Add multizone PV tests

This commit is contained in:
Michelle Au 2017-02-28 22:26:41 -08:00
parent 2e677a5f80
commit e6a008dac1
2 changed files with 137 additions and 8 deletions

View File

@ -88,6 +88,7 @@ type PersistentVolumeClaimConfig struct {
AccessModes []v1.PersistentVolumeAccessMode
Annotations map[string]string
Selector *metav1.LabelSelector
StorageClassName *string
}
// Clean up a pv and pvc in a single pv/pvc test case.
@ -625,14 +626,15 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
StorageClassName: cfg.StorageClassName,
},
}
}
func CreatePDWithRetry() (string, error) {
func createPDWithRetry(zone string) (string, error) {
var err error
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
newDiskName, err := createPD()
newDiskName, err := createPD(zone)
if err != nil {
Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
continue
@ -643,6 +645,14 @@ func CreatePDWithRetry() (string, error) {
return "", err
}
func CreatePDWithRetry() (string, error) {
return createPDWithRetry("")
}
func CreatePDWithRetryAndZone(zone string) (string, error) {
return createPDWithRetry(zone)
}
func DeletePDWithRetry(diskName string) error {
var err error
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
@ -657,7 +667,11 @@ func DeletePDWithRetry(diskName string) error {
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
}
func createPD() (string, error) {
func createPD(zone string) (string, error) {
if zone == "" {
zone = TestContext.CloudConfig.Zone
}
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
@ -667,7 +681,7 @@ func createPD() (string, error) {
}
tags := map[string]string{}
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
if err != nil {
return "", err
}
@ -676,7 +690,7 @@ func createPD() (string, error) {
client := ec2.New(session.New())
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = aws.String(TestContext.CloudConfig.Zone)
request.AvailabilityZone = aws.String(zone)
request.Size = aws.Int64(10)
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
response, err := client.CreateVolume(request)
@ -828,3 +842,39 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
}
return persistentvolumes, nil
}
func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
diskName, err := CreatePDWithRetryAndZone(zone)
if err != nil {
return nil, err
}
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
}, nil
} else if TestContext.Provider == "aws" {
return &v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
FSType: "ext3",
},
}, nil
} else {
return nil, fmt.Errorf("Provider not supported")
}
}
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
} else if TestContext.Provider == "aws" {
return DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
} else {
return fmt.Errorf("Provider not supported")
}
}

View File

@ -56,6 +56,10 @@ var _ = framework.KubeDescribe("Multi-AZ Clusters", func() {
It("should spread the pods of a replication controller across zones", func() {
SpreadRCOrFail(f, int32((2*zoneCount)+1), image)
})
It("should schedule pods in the same zones as statically provisioned PVs", func() {
PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
})
})
// Check that the pods comprising a service get spread evenly across available zones
@ -240,3 +244,78 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
}
type StaticPVTestConfig struct {
pvSource *v1.PersistentVolumeSource
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
pod *v1.Pod
}
// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in.
func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) {
// TODO: add GKE after enabling admission plugin in GKE
// TODO: add AWS
framework.SkipUnlessProviderIs("gce")
var err error
c := f.ClientSet
ns := f.Namespace.Name
zones, err := getZoneNames(c)
Expect(err).NotTo(HaveOccurred())
By("Creating static PVs across zones")
configs := make([]*StaticPVTestConfig, podCount)
for i := range configs {
configs[i] = &StaticPVTestConfig{}
}
defer func() {
By("Cleaning up pods and PVs")
for _, config := range configs {
framework.DeletePodOrFail(c, ns, config.pod.Name)
}
for _, config := range configs {
framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
err = framework.DeletePVSource(config.pvSource)
Expect(err).NotTo(HaveOccurred())
}
}()
for i, config := range configs {
zone := zones[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
Expect(err).NotTo(HaveOccurred())
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "multizone-pv",
PVSource: *config.pvSource,
Prebind: nil,
}
className := ""
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for all PVCs to be bound")
for _, config := range configs {
framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
}
By("Creating pods for each static PV")
for _, config := range configs {
podConfig := framework.MakePod(ns, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.Core().Pods(ns).Create(podConfig)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for all pods to be running")
for _, config := range configs {
err = framework.WaitForPodRunningInNamespace(c, config.pod)
Expect(err).NotTo(HaveOccurred())
}
}