e2e/storage: improve PV checking

TestDynamicProvisioning had multiple ways of choosing additional
checks:
- the PvCheck callback
- the builtin write/read check controlled by a boolean
- the snapshot testing

Complicating matters further, that builtin write/read test had been
more customizable with new fields `NodeSelector` and
`ExpectUnschedulable` which were only set by one particular test (see
https://github.com/kubernetes/kubernetes/pull/70941).

That is confusing and will only get more confusing when adding more
checks in the future. Therefore the write/read check is now a separate
function that must be enabled explicitly by tests that want to run it.
The snapshot checking is also defined only for the snapshot test.

The test that expects unschedulable pods now also checks for that
particular situation itself. Instead of testing it with two pods (the
behavior from the write/read check) that both fail to start, only a
single unschedulable pod is created.

Because node name, node selector and the `ExpectUnschedulable` were
only used for checking, it is possible to simplify `StorageClassTest`
by removing all of these fields.

Expect(err).NotTo(HaveOccurred()) is an anti-pattern in Ginkgo testing
because a test failure doesn't explain what failed (see
https://github.com/kubernetes/kubernetes/issues/34059). We avoid it
now by making the check function itself responsible for checking
errors and including more information in those checks.
This commit is contained in:
Patrick Ohly 2019-01-16 12:26:38 +01:00
parent 5b8826b610
commit 54d8f1648f
4 changed files with 195 additions and 138 deletions

View File

@ -261,9 +261,11 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
Parameters: sc.Parameters, Parameters: sc.Parameters,
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
NodeName: nodeName,
} }
class, claim, pod := startPausePod(cs, scTest, ns.Name) nodeSelection := testsuites.NodeSelection{
Name: nodeName,
}
class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name)
if class != nil { if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil) defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
} }
@ -381,16 +383,16 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
Parameters: sc.Parameters, Parameters: sc.Parameters,
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
// The mock driver only works when everything runs on a single node.
NodeName: nodeName,
// Provisioner and storage class name must match what's used in // Provisioner and storage class name must match what's used in
// csi-storageclass.yaml, plus the test-specific suffix. // csi-storageclass.yaml, plus the test-specific suffix.
Provisioner: sc.Provisioner, Provisioner: sc.Provisioner,
StorageClassName: "csi-mock-sc-" + f.UniqueName, StorageClassName: "csi-mock-sc-" + f.UniqueName,
// Mock driver does not provide any persistency.
SkipWriteReadCheck: true,
} }
class, claim, pod := startPausePod(cs, scTest, ns.Name) nodeSelection := testsuites.NodeSelection{
// The mock driver only works when everything runs on a single node.
Name: nodeName,
}
class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name)
if class != nil { if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil) defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
} }
@ -429,7 +431,7 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
if delayBinding { if delayBinding {
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class) _, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */)
Expect(node).ToNot(BeNil(), "Unexpected nil node found") Expect(node).ToNot(BeNil(), "Unexpected nil node found")
} else { } else {
testsuites.TestDynamicProvisioning(test, cs, claim, class) testsuites.TestDynamicProvisioning(test, cs, claim, class)
@ -450,16 +452,22 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test := createGCEPDStorageClassTest() test := createGCEPDStorageClassTest()
test.DelayBinding = delayBinding test.DelayBinding = delayBinding
test.NodeSelector = map[string]string{v1.LabelZoneFailureDomain: podZone} nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
test.ExpectUnschedulable = true
class := newStorageClass(test, namespace, suffix) class := newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone) addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone)
claim := newClaim(test, namespace, suffix) claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
if delayBinding { if delayBinding {
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class) testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */)
} else { } else {
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// Ensure that a pod cannot be scheduled in an unsuitable zone.
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
testsuites.NodeSelection{Selector: nodeSelector})
defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
}
testsuites.TestDynamicProvisioning(test, cs, claim, class) testsuites.TestDynamicProvisioning(test, cs, claim, class)
} }
} }
@ -500,7 +508,7 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st
return pv.Spec.CSI.VolumeHandle return pv.Spec.CSI.VolumeHandle
} }
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class := newStorageClass(t, ns, "") class := newStorageClass(t, ns, "")
class, err := cs.StorageV1().StorageClasses().Create(class) class, err := cs.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err, "Failed to create class : %v", err) framework.ExpectNoError(err, "Failed to create class : %v", err)
@ -514,6 +522,9 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str
GenerateName: "pvc-volume-tester-", GenerateName: "pvc-volume-tester-",
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
NodeName: node.Name,
NodeSelector: node.Selector,
Affinity: node.Affinity,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "volume-tester", Name: "volume-tester",
@ -541,9 +552,6 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str
}, },
} }
if len(t.NodeName) != 0 {
pod.Spec.NodeName = t.NodeName
}
pod, err = cs.CoreV1().Pods(ns).Create(pod) pod, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod return class, claim, pod

View File

@ -108,12 +108,14 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
}, },
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-standard") var err error
if err != nil { err = checkGCEPD(volume, "pd-standard")
return err Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
} err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
return verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */) Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
{ {
@ -126,16 +128,16 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
}, },
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-standard") var err error
if err != nil { err = checkGCEPD(volume, "pd-standard")
return err Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
}
zones, err := framework.GetClusterZones(c) zones, err := framework.GetClusterZones(c)
if err != nil { Expect(err).NotTo(HaveOccurred(), "GetClusterZones")
return err err = verifyZonesInPV(volume, zones, false /* match */)
} Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
return verifyZonesInPV(volume, zones, false /* match */)
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
} }
@ -317,7 +319,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim) claims = append(claims, claim)
} }
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class) pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") framework.Failf("unexpected nil node found")
} }
@ -374,7 +376,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim) claims = append(claims, claim)
} }
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class) pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") framework.Failf("unexpected nil node found")
} }

View File

@ -37,22 +37,19 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
// StorageClassTest represents parameters to be used by provisioning tests // StorageClassTest represents parameters to be used by provisioning tests.
// Not all parameters are used by all tests.
type StorageClassTest struct { type StorageClassTest struct {
Name string Name string
CloudProviders []string CloudProviders []string
Provisioner string Provisioner string
StorageClassName string StorageClassName string
Parameters map[string]string Parameters map[string]string
DelayBinding bool DelayBinding bool
ClaimSize string ClaimSize string
ExpectedSize string ExpectedSize string
PvCheck func(volume *v1.PersistentVolume) error PvCheck func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume)
NodeName string VolumeMode *v1.PersistentVolumeMode
SkipWriteReadCheck bool
VolumeMode *v1.PersistentVolumeMode
NodeSelector map[string]string // NodeSelector for the pod
ExpectUnschedulable bool // Whether the test pod is expected to be unschedulable
} }
type provisioningTestSuite struct { type provisioningTestSuite struct {
@ -89,14 +86,14 @@ func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPat
testCase: StorageClassTest{ testCase: StorageClassTest{
ClaimSize: resource.claimSize, ClaimSize: resource.claimSize,
ExpectedSize: resource.claimSize, ExpectedSize: resource.claimSize,
NodeName: driver.GetDriverInfo().Config.ClientNodeName,
}, },
cs: driver.GetDriverInfo().Config.Framework.ClientSet, cs: driver.GetDriverInfo().Config.Framework.ClientSet,
dc: driver.GetDriverInfo().Config.Framework.DynamicClient, dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
pvc: resource.pvc, pvc: resource.pvc,
sc: resource.sc, sc: resource.sc,
vsc: resource.vsc, vsc: resource.vsc,
dInfo: driver.GetDriverInfo(), dInfo: driver.GetDriverInfo(),
nodeName: driver.GetDriverInfo().Config.ClientNodeName,
} }
return resource, input return resource, input
@ -179,10 +176,17 @@ type provisioningTestInput struct {
sc *storage.StorageClass sc *storage.StorageClass
vsc *unstructured.Unstructured vsc *unstructured.Unstructured
dInfo *DriverInfo dInfo *DriverInfo
nodeName string
} }
func testProvisioning(input *provisioningTestInput) { func testProvisioning(input *provisioningTestInput) {
// common checker for most of the test cases below
pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVWriteReadCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
}
It("should provision storage with defaults", func() { It("should provision storage with defaults", func() {
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
}) })
@ -192,6 +196,7 @@ func testProvisioning(input *provisioningTestInput) {
} }
input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List() input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List()
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
}) })
@ -201,7 +206,6 @@ func testProvisioning(input *provisioningTestInput) {
} }
block := v1.PersistentVolumeBlock block := v1.PersistentVolumeBlock
input.testCase.VolumeMode = &block input.testCase.VolumeMode = &block
input.testCase.SkipWriteReadCheck = true
input.pvc.Spec.VolumeMode = &block input.pvc.Spec.VolumeMode = &block
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
}) })
@ -211,11 +215,15 @@ func testProvisioning(input *provisioningTestInput) {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name) framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name)
} }
input.testCase.SkipWriteReadCheck = true dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc)
defer cleanupFunc() defer cleanupFunc()
input.pvc.Spec.DataSource = dataSource input.pvc.Spec.DataSource = dataSource
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
}) })
} }
@ -288,35 +296,7 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
// Run the checker // Run the checker
if t.PvCheck != nil { if t.PvCheck != nil {
err = t.PvCheck(pv) t.PvCheck(claim, pv)
Expect(err).NotTo(HaveOccurred())
}
if claim.Spec.DataSource != nil {
By("checking the created volume whether has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-snapshot-reader", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable)
}
if !t.SkipWriteReadCheck {
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable and has the PV's mount options")
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range pv.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", t.NodeName, "grep 'hello world' /mnt/test/data", t.NodeSelector, t.ExpectUnschedulable)
} }
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
@ -337,15 +317,41 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
return pv return pv
} }
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) { // PVWriteReadCheck checks that a PV retains data.
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class) //
// It starts two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) {
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range volume.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %+v", node))
command = "grep 'hello world' /mnt/test/data"
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, node)
}
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable)
if pvs == nil { if pvs == nil {
return nil, node return nil, node
} }
return pvs[0], node return pvs[0], node
} }
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass) ([]*v1.PersistentVolume, *v1.Node) { func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error var err error
Expect(len(claims)).ToNot(Equal(0)) Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace namespace := claims[0].Namespace
@ -388,8 +394,8 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
By("creating a pod referring to the claims") By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running // Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod var pod *v1.Pod
if t.ExpectUnschedulable { if expectUnschedulable {
pod, err = framework.CreateUnschedulablePod(client, namespace, t.NodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else { } else {
pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
} }
@ -398,7 +404,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
framework.DeletePodOrFail(client, pod.Namespace, pod.Name) framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
}() }()
if t.ExpectUnschedulable { if expectUnschedulable {
// Verify that no claims are provisioned. // Verify that no claims are provisioned.
verifyPVCsPending(client, createdClaims) verifyPVCsPending(client, createdClaims)
return nil, nil return nil, nil
@ -426,8 +432,25 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
return pvs, node return pvs, node
} }
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. // NodeSelection specifies where to run a pod, using a combination of fixed node name,
func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName, command string, nodeSelector map[string]string, unschedulable bool) { // node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
defer StopPod(c, pod)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -437,7 +460,9 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName,
GenerateName: podName + "-", GenerateName: podName + "-",
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
NodeName: nodeName, NodeName: node.Name,
NodeSelector: node.Selector,
Affinity: node.Affinity,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "volume-tester", Name: "volume-tester",
@ -464,27 +489,26 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName,
}, },
}, },
}, },
NodeSelector: nodeSelector,
}, },
} }
pod, err := c.CoreV1().Pods(ns).Create(pod) pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() { return pod
body, err := c.CoreV1().Pods(ns).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw() }
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.DeletePodOrFail(c, ns, pod.Name)
}()
if unschedulable { // StopPod first tries to log the output of the pod's container, then deletes the pod.
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(c, pod.Name, pod.Namespace)) func StopPod(c clientset.Interface, pod *v1.Pod) {
} else { if pod == nil {
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) return
} }
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.DeletePodOrFail(c, pod.Namespace, pod.Name)
} }
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
@ -497,7 +521,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
} }
func prepareDataSourceForProvisioning( func prepareDataSourceForProvisioning(
t StorageClassTest, node NodeSelection,
client clientset.Interface, client clientset.Interface,
dynamicClient dynamic.Interface, dynamicClient dynamic.Interface,
initClaim *v1.PersistentVolumeClaim, initClaim *v1.PersistentVolumeClaim,
@ -525,7 +549,7 @@ func prepareDataSourceForProvisioning(
// write namespace to the /mnt/test (= the volume). // write namespace to the /mnt/test (= the volume).
By("[Initialize dataSource]write data to volume") By("[Initialize dataSource]write data to volume")
command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace()) command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace())
runInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node)
By("[Initialize dataSource]creating a SnapshotClass") By("[Initialize dataSource]creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})

View File

@ -226,7 +226,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim) claims = append(claims, claim)
} }
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class) pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") framework.Failf("unexpected nil node found")
} }
@ -273,8 +273,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkGCEPD(volume, "pd-ssd") err := checkGCEPD(volume, "pd-ssd")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
{ {
@ -286,8 +288,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
// AWS // AWS
@ -301,8 +305,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkAWSEBS(volume, "gp2", false) err := checkAWSEBS(volume, "gp2", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
{ {
@ -315,8 +321,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "3.5Gi", ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1 ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkAWSEBS(volume, "io1", false) err := checkAWSEBS(volume, "io1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
{ {
@ -328,8 +336,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "500Gi", // minimum for sc1 ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkAWSEBS(volume, "sc1", false) err := checkAWSEBS(volume, "sc1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
{ {
@ -341,8 +351,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "500Gi", // minimum for st1 ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkAWSEBS(volume, "st1", false) err := checkAWSEBS(volume, "st1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
{ {
@ -354,8 +366,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkAWSEBS(volume, "gp2", true) err := checkAWSEBS(volume, "gp2", true)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
}, },
// OpenStack generic tests (works on all OpenStack deployments) // OpenStack generic tests (works on all OpenStack deployments)
@ -366,7 +380,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{}, Parameters: map[string]string{},
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: nil, // there is currently nothing to check on OpenStack PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
},
}, },
{ {
Name: "Cinder volume with empty volume type and zone on OpenStack", Name: "Cinder volume with empty volume type and zone on OpenStack",
@ -378,7 +394,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: nil, // there is currently nothing to check on OpenStack PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
},
}, },
// vSphere generic test // vSphere generic test
{ {
@ -388,7 +406,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{}, Parameters: map[string]string{},
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi", ExpectedSize: "1.5Gi",
PvCheck: nil, PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
},
}, },
// Azure // Azure
{ {
@ -398,7 +418,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{}, Parameters: map[string]string{},
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: nil, PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
},
}, },
} }
@ -451,8 +473,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, },
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(volume *v1.PersistentVolume) error { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
return checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
}, },
} }
class := newStorageClass(test, ns, "reclaimpolicy") class := newStorageClass(test, ns, "reclaimpolicy")
@ -793,12 +817,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
serverUrl := "https://" + pod.Status.PodIP + ":8081" serverUrl := "https://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass") By("creating a StorageClass")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "Gluster Dynamic provisioner test", Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs", Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi", ClaimSize: "2Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverUrl}, Parameters: map[string]string{"resturl": serverUrl},
SkipWriteReadCheck: true,
} }
suffix := fmt.Sprintf("glusterdptest") suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix) class := newStorageClass(test, ns, suffix)