Review fixes

This commit is contained in:
Jan Safranek 2017-04-07 13:17:47 +02:00
parent a327302200
commit c8bc39dd1a

View File

@ -61,33 +61,34 @@ const (
) )
func testDynamicProvisioning(t storageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) { func testDynamicProvisioning(t storageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) {
var err error
if class != nil { if class != nil {
By("creating a StorageClass " + class.Name) By("creating a StorageClass " + class.Name)
class, err := client.Storage().StorageClasses().Create(class) class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
framework.Logf("deleting storage class %s", class.Name) framework.Logf("deleting storage class %s", class.Name)
client.Storage().StorageClasses().Delete(class.Name, nil) client.StorageV1().StorageClasses().Delete(class.Name, nil)
}() }()
Expect(err).NotTo(HaveOccurred())
} }
By("creating a claim") By("creating a claim")
claim, err := client.Core().PersistentVolumeClaims(claim.Namespace).Create(claim) claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
framework.Logf("deleting claim %s/%s", claim.Namespace, claim.Name) framework.Logf("deleting claim %s/%s", claim.Namespace, claim.Name)
client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
}() }()
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("checking the claim") By("checking the claim")
// Get new copy of the claim // Get new copy of the claim
claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Get the bound PV // Get the bound PV
pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check sizes // Check sizes
@ -125,7 +126,7 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data") runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
By("deleting the claim") By("deleting the claim")
framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
// Wait for the PV to get deleted. Technically, the first few delete // Wait for the PV to get deleted. Technically, the first few delete
// attempts may fail, as the volume is still attached to a node because // attempts may fail, as the volume is still attached to a node because
@ -216,182 +217,179 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
}) })
framework.KubeDescribe("DynamicProvisioner", func() { framework.KubeDescribe("DynamicProvisioner", func() {
// This test checks that dynamic provisioning can provision a volume It("should provision storage with different parameters [Slow] [Volume]", func() {
// that can be used to persist data among pods. cloudZone := getRandomCloudZone(c)
tests := []storageClassTest{ // This test checks that dynamic provisioning can provision a volume
{ // that can be used to persist data among pods.
"should provision SSD PD on GCE/GKE", tests := []storageClassTest{
[]string{"gce", "gke"}, {
"kubernetes.io/gce-pd", "SSD PD on GCE/GKE",
map[string]string{ []string{"gce", "gke"},
"type": "pd-ssd", "kubernetes.io/gce-pd",
// Check that GCE can parse "zone" parameter, however map[string]string{
// we can't create PDs in different than default zone "type": "pd-ssd",
// as we don't know if we're running with Multizone=true "zone": cloudZone,
"zone": framework.TestContext.CloudConfig.Zone, },
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
},
}, },
"1.5Gi", {
"2Gi", "HDD PD on GCE/GKE",
func(volume *v1.PersistentVolume) error { []string{"gce", "gke"},
return checkGCEPD(volume, "pd-ssd") "kubernetes.io/gce-pd",
map[string]string{
"type": "pd-standard",
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
}, },
}, // AWS
{ {
"should provision HDD PD on GCE/GKE", "gp2 EBS on AWS",
[]string{"gce", "gke"}, []string{"aws"},
"kubernetes.io/gce-pd", "kubernetes.io/aws-ebs",
map[string]string{ map[string]string{
"type": "pd-standard", "type": "gp2",
"zone": cloudZone,
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", false)
},
}, },
"1.5Gi", {
"2Gi", "io1 EBS on AWS",
func(volume *v1.PersistentVolume) error { []string{"aws"},
return checkGCEPD(volume, "pd-standard") "kubernetes.io/aws-ebs",
map[string]string{
"type": "io1",
"iopsPerGB": "50",
},
"3.5Gi",
"4Gi", // 4 GiB is minimum for io1
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "io1", false)
},
}, },
}, {
// AWS "sc1 EBS on AWS",
{ []string{"aws"},
"should provision gp2 EBS on AWS", "kubernetes.io/aws-ebs",
[]string{"aws"}, map[string]string{
"kubernetes.io/aws-ebs", "type": "sc1",
map[string]string{ },
"type": "gp2", "500Gi", // minimum for sc1
// Check that AWS can parse "zone" parameter, however "500Gi",
// we can't create PDs in different than default zone func(volume *v1.PersistentVolume) error {
// as we don't know zone names return checkAWSEBS(volume, "sc1", false)
"zone": framework.TestContext.CloudConfig.Zone, },
}, },
"1.5Gi", {
"2Gi", "st1 EBS on AWS",
func(volume *v1.PersistentVolume) error { []string{"aws"},
return checkAWSEBS(volume, "gp2", false) "kubernetes.io/aws-ebs",
map[string]string{
"type": "st1",
},
"500Gi", // minimum for st1
"500Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
},
}, },
}, {
{ "encrypted EBS on AWS",
"should provision io1 EBS on AWS", []string{"aws"},
[]string{"aws"}, "kubernetes.io/aws-ebs",
"kubernetes.io/aws-ebs", map[string]string{
map[string]string{ "encrypted": "true",
"type": "io1", },
"iopsPerGB": "50", "1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
},
}, },
"3.5Gi", // OpenStack generic tests (works on all OpenStack deployments)
"4Gi", // 4 GiB is minimum for io1 {
func(volume *v1.PersistentVolume) error { "generic Cinder volume on OpenStack",
return checkAWSEBS(volume, "io1", false) []string{"openstack"},
"kubernetes.io/cinder",
map[string]string{},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
}, },
}, {
{ "Cinder volume with empty volume type and zone on OpenStack",
"should provision sc1 EBS on AWS", []string{"openstack"},
[]string{"aws"}, "kubernetes.io/cinder",
"kubernetes.io/aws-ebs", map[string]string{
map[string]string{ "type": "",
"type": "sc1", "availability": "",
},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
}, },
"500Gi", // minimum for sc1 // vSphere generic test
"500Gi", {
func(volume *v1.PersistentVolume) error { "generic vSphere volume",
return checkAWSEBS(volume, "sc1", false) []string{"vsphere"},
"kubernetes.io/vsphere-volume",
map[string]string{},
"1.5Gi",
"1.5Gi",
nil,
}, },
}, }
{
"should provision st1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"type": "st1",
},
"500Gi", // minimum for st1
"500Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
},
},
{
"should provision encrypted EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
"encrypted": "true",
},
"1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
"should provision generic Cinder volume on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
},
{
"should provision Cinder volume with empty volume type and zone on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{
"type": "",
"availability": "",
},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
},
// vSphere generic test
{
"should provision generic vSphere",
[]string{"vsphere"},
"kubernetes.io/vsphere-volume",
map[string]string{},
"1.5Gi",
"1.5Gi",
nil,
},
}
var betaTest *storageClassTest var betaTest *storageClassTest
for i, t := range tests { for i, t := range tests {
// Beware of clojure, use local variables instead of those from // Beware of clojure, use local variables instead of those from
// outer scope // outer scope
test := t test := t
suffix := fmt.Sprintf("%d", i)
It(test.name+" [Slow] [Volume]", func() { if !framework.ProviderIs(test.cloudProviders...) {
if len(t.cloudProviders) > 0 { framework.Logf("Skipping %q: cloud providers is not %v", test.name, test.cloudProviders)
framework.SkipUnlessProviderIs(test.cloudProviders...) continue
} }
// Remember the last supported test for subsequent test of beta API // Remember the last supported test for subsequent test of beta API
betaTest = &test betaTest = &test
By("Testing " + test.name)
suffix := fmt.Sprintf("%d", i)
class := newStorageClass(test, ns, suffix) class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix) claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
testDynamicProvisioning(test, c, claim, class) testDynamicProvisioning(test, c, claim, class)
}) }
}
// Run the last test with storage.k8s.io/v1beta1 and beta annotation on pvc // Run the last test with storage.k8s.io/v1beta1 and beta annotation on pvc
if betaTest != nil { if betaTest != nil {
It("Beta "+betaTest.name+" [Slow] [Volume]", func() { By("Testing " + betaTest.name + " with beta volume provisioning")
class := newBetaStorageClass(*betaTest, "beta") class := newBetaStorageClass(*betaTest, "beta")
// we need to create the class manually, testDynamicProvisioning does not accept beta class // we need to create the class manually, testDynamicProvisioning does not accept beta class
class, err := c.StorageV1beta1().StorageClasses().Create(class) class, err := c.StorageV1beta1().StorageClasses().Create(class)
defer deleteStorageClass(c, class.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
claim := newClaim(*betaTest, ns, "beta") claim := newClaim(*betaTest, ns, "beta")
claim.Annotations = map[string]string{ claim.Annotations = map[string]string{
v1.BetaStorageClassAnnotation: class.Name, v1.BetaStorageClassAnnotation: class.Name,
} }
testDynamicProvisioning(*betaTest, c, claim, nil) testDynamicProvisioning(*betaTest, c, claim, nil)
}) }
} })
// NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout) when there is // NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout) when there is
// no regression. // no regression.
@ -440,7 +438,7 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
By("Creating a claim and expecting it to timeout") By("Creating a claim and expecting it to timeout")
pvc := newClaim(test, ns, suffix) pvc := newClaim(test, ns, suffix)
pvc.Spec.StorageClassName = &sc.Name pvc.Spec.StorageClassName = &sc.Name
pvc, err = c.Core().PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(c, pvc.Name, ns) defer framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
@ -468,7 +466,7 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
} }
class := newStorageClass(test, ns, "race") class := newStorageClass(test, ns, "race")
class, err := c.Storage().StorageClasses().Create(class) class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name) defer deleteStorageClass(c, class.Name)
@ -591,17 +589,17 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
By("creating a claim with default storageclass and expecting it to timeout") By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default") claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns) framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}() }()
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
framework.Logf(err.Error()) framework.Logf(err.Error())
claim, err = c.Core().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}) })
@ -622,17 +620,17 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
By("creating a claim with default storageclass and expecting it to timeout") By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default") claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
framework.DeletePersistentVolumeClaim(c, claim.Name, ns) framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
}() }()
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
framework.Logf(err.Error()) framework.Logf(err.Error())
claim, err = c.Core().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}) })
@ -751,7 +749,7 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
}, },
}, },
} }
pod, err := c.Core().Pods(ns).Create(pod) pod, err := c.CoreV1().Pods(ns).Create(pod)
defer func() { defer func() {
framework.DeletePodOrFail(c, ns, pod.Name) framework.DeletePodOrFail(c, ns, pod.Name)
}() }()
@ -819,7 +817,7 @@ func newBetaStorageClass(t storageClassTest, suffix string) *storagebeta.Storage
} }
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod { func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.Core().Pods(ns) podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{ provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -899,7 +897,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) {
remainingPVs = []*v1.PersistentVolume{} remainingPVs = []*v1.PersistentVolume{}
allPVs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{}) allPVs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil { if err != nil {
return true, err return true, err
} }
@ -919,7 +917,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(c clientset.Interface, className string) { func deleteStorageClass(c clientset.Interface, className string) {
err := c.Storage().StorageClasses().Delete(className, nil) err := c.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -932,3 +930,19 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
framework.DeletePersistentVolume(c, pv.Name) framework.DeletePersistentVolume(c, pv.Name)
} }
} }
func getRandomCloudZone(c clientset.Interface) string {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[metav1.LabelZoneFailureDomain]; found {
zones.Insert(zone)
}
}
// return "" in case that no node has zone label
zone, _ := zones.PopAny()
return zone
}