Merge pull request #42019 from msau42/pv_upgrade_test

Automatic merge from submit-queue (batch tested with PRs 41962, 42055, 42062, 42019, 42054)

PV upgrade test

**What this PR does / why we need it**:
This PR adds a PV upgrade test to the new upgrade test framework.  Before, this test had to be done manually.  Currently the upgrade test framework only works on the GCE environment, so I plan to add support for other providers later.  In order to write the test, I had to modify and refactor some volume test util libraries.  I reran the impacted tests to make sure they still passed.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:
It's probably easier to review the two commits separately.  I split it up into the refactor changes, and the upgrade test changes.

**Release note**:

NONE


cc @saad-ali @krousey
This commit is contained in:
Kubernetes Submit Queue 2017-02-27 00:16:58 -08:00 committed by GitHub
commit 37acb8423c
10 changed files with 244 additions and 132 deletions

View File

@ -160,7 +160,6 @@ go_library(
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/aws/aws-sdk-go/aws",
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
"//vendor:github.com/aws/aws-sdk-go/aws/session",
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
"//vendor:github.com/aws/aws-sdk-go/service/ec2",

View File

@ -36,6 +36,7 @@ var upgradeTests = []upgrades.Test{
&upgrades.DeploymentUpgradeTest{},
&upgrades.ConfigMapUpgradeTest{},
&upgrades.HPAUpgradeTest{},
&upgrades.PersistentVolumeUpgradeTest{},
}
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {

View File

@ -54,6 +54,7 @@ go_library(
"//pkg/client/conditions:go_default_library",
"//pkg/client/unversioned/remotecommand:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
@ -79,6 +80,10 @@ go_library(
"//test/e2e/generated:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/aws/aws-sdk-go/aws",
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
"//vendor:github.com/aws/aws-sdk-go/aws/session",
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
"//vendor:github.com/golang/glog",
"//vendor:github.com/google/cadvisor/info/v1",
"//vendor:github.com/onsi/ginkgo",

View File

@ -18,20 +18,34 @@ package framework
import (
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"google.golang.org/api/googleapi"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
PDRetryTimeout = 5 * time.Minute
PDRetryPollTime = 5 * time.Second
)
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
// in the PV's name and if it is present. We must always Get the pv object before
@ -506,17 +520,16 @@ func MakePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvcName string) *v1.Pod {
return MakePod(ns, pvcName, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
return MakePod(ns, pvcName, true, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
func MakePod(ns string, pvcName string, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = []string{"while true; do sleep 1; done"}
command = "while true; do sleep 1; done"
}
var isPrivileged bool = true
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -531,8 +544,8 @@ func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c"},
Args: command,
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: pvcName,
@ -561,7 +574,7 @@ func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
clientPod := MakePod(ns, pvc.Name)
clientPod := MakePod(ns, pvc.Name, true, "")
clientPod, err := c.Core().Pods(ns).Create(clientPod)
Expect(err).NotTo(HaveOccurred())
@ -572,3 +585,106 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC
Expect(apierrs.IsNotFound(err)).To(BeFalse())
return clientPod
}
func CreatePDWithRetry() (string, error) {
newDiskName := ""
var err error
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
if newDiskName, err = createPD(); err != nil {
Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
continue
}
Logf("Successfully created a new PD: %q.", newDiskName)
break
}
return newDiskName, err
}
func DeletePDWithRetry(diskName string) {
var err error
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
if err = deletePD(diskName); err != nil {
Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
continue
}
Logf("Successfully deleted PD %q.", diskName)
break
}
ExpectNoError(err, "Error deleting PD")
}
func createPD() (string, error) {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
gceCloud, err := GetGCECloud()
if err != nil {
return "", err
}
tags := map[string]string{}
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
if err != nil {
return "", err
}
return pdName, nil
} else if TestContext.Provider == "aws" {
client := ec2.New(session.New())
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = aws.String(TestContext.CloudConfig.Zone)
request.Size = aws.Int64(10)
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
response, err := client.CreateVolume(request)
if err != nil {
return "", err
}
az := aws.StringValue(response.AvailabilityZone)
awsID := aws.StringValue(response.VolumeId)
volumeName := "aws://" + az + "/" + awsID
return volumeName, nil
} else {
return "", fmt.Errorf("Provider does not support volume creation")
}
}
func deletePD(pdName string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
err = gceCloud.DeleteDisk(pdName)
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
// PD already exists, ignore error.
return nil
}
Logf("Error deleting PD %q: %v", pdName, err)
}
return err
} else if TestContext.Provider == "aws" {
client := ec2.New(session.New())
tokens := strings.Split(pdName, "/")
awsVolumeID := tokens[len(tokens)-1]
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
_, err := client.DeleteVolume(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else {
return fmt.Errorf("error deleting EBS volumes: %v", err)
}
}
return nil
} else {
return fmt.Errorf("Provider does not support volume deletion")
}
}

View File

@ -26,7 +26,6 @@ import (
"google.golang.org/api/googleapi"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo"
@ -38,8 +37,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -48,8 +45,6 @@ const (
gcePDDetachPollTime = 10 * time.Second
nodeStatusTimeout = 1 * time.Minute
nodeStatusPollTime = 1 * time.Second
gcePDRetryTimeout = 5 * time.Minute
gcePDRetryPollTime = 5 * time.Second
maxReadRetry = 3
)
@ -82,7 +77,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
@ -146,7 +141,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
@ -210,7 +205,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("creating PD")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
@ -262,7 +257,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("creating PD")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
@ -314,7 +309,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
numContainers := 4
var host0Pod *v1.Pod
@ -367,10 +362,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD1")
disk1Name, err := createPDWithRetry()
disk1Name, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD1")
By("creating PD2")
disk2Name, err := createPDWithRetry()
disk2Name, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD2")
var host0Pod *v1.Pod
@ -430,7 +425,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Error getting group size")
By("Creating a pd")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating a pd")
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
@ -483,7 +478,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
framework.ExpectNoError(err, "Error getting group size")
By("Creating a pd")
diskName, err := createPDWithRetry()
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating a pd")
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
@ -531,33 +526,6 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
})
})
func createPDWithRetry() (string, error) {
newDiskName := ""
var err error
for start := time.Now(); time.Since(start) < gcePDRetryTimeout; time.Sleep(gcePDRetryPollTime) {
if newDiskName, err = createPD(); err != nil {
framework.Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
continue
}
framework.Logf("Successfully created a new PD: %q.", newDiskName)
break
}
return newDiskName, err
}
func deletePDWithRetry(diskName string) {
var err error
for start := time.Now(); time.Since(start) < gcePDRetryTimeout; time.Sleep(gcePDRetryPollTime) {
if err = deletePD(diskName); err != nil {
framework.Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
continue
}
framework.Logf("Successfully deleted PD %q.", diskName)
break
}
framework.ExpectNoError(err, "Error deleting PD")
}
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
for filePath, expectedContents := range fileAndContentToVerify {
var value string
@ -585,82 +553,6 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
}
}
func createPD() (string, error) {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
gceCloud, err := framework.GetGCECloud()
if err != nil {
return "", err
}
tags := map[string]string{}
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
if err != nil {
return "", err
}
return pdName, nil
} else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New())
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = aws.String(cloudConfig.Zone)
request.Size = aws.Int64(10)
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
response, err := client.CreateVolume(request)
if err != nil {
return "", err
}
az := aws.StringValue(response.AvailabilityZone)
awsID := aws.StringValue(response.VolumeId)
volumeName := "aws://" + az + "/" + awsID
return volumeName, nil
} else {
return "", fmt.Errorf("Provider does not support volume creation")
}
}
func deletePD(pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := framework.GetGCECloud()
if err != nil {
return err
}
err = gceCloud.DeleteDisk(pdName)
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
// PD already exists, ignore error.
return nil
}
framework.Logf("Error deleting PD %q: %v", pdName, err)
}
return err
} else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New())
tokens := strings.Split(pdName, "/")
awsVolumeID := tokens[len(tokens)-1]
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
_, err := client.DeleteVolume(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
framework.Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else {
return fmt.Errorf("error deleting EBS volumes: %v", err)
}
}
return nil
} else {
return fmt.Errorf("Provider does not support volume deletion")
}
}
func detachPD(nodeName types.NodeName, pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := framework.GetGCECloud()
@ -806,7 +698,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
waitForPDDetach(diskName, host)
}
By(fmt.Sprintf("Deleting PD %q", diskName))
deletePDWithRetry(diskName)
framework.DeletePDWithRetry(diskName)
}
func waitForPDInVolumesInUse(

View File

@ -189,7 +189,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc := framework.CreatePVPVC(c, pvConfig, ns, false)
pod := framework.MakePod(ns, pvc.Name)
pod := framework.MakePod(ns, pvc.Name, true, "")
pod.Spec.NodeName = nodeName
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
pod, err := c.Core().Pods(ns).Create(pod)

View File

@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
// If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, pvc.Name, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod = framework.MakePod(ns, pvc.Name, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.Core().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
@ -310,7 +310,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
framework.SkipUnlessProviderIs("gce")
By("Initializing Test Spec")
if diskName == "" {
diskName, err = createPDWithRetry()
diskName, err = framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "gce-",
@ -342,7 +342,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
AddCleanupAction(func() {
if len(diskName) > 0 {
deletePDWithRetry(diskName)
framework.DeletePDWithRetry(diskName)
}
})

View File

@ -13,6 +13,7 @@ go_library(
"configmaps.go",
"deployments.go",
"horizontal_pod_autoscalers.go",
"persistent_volumes.go",
"secrets.go",
"services.go",
"statefulset.go",

View File

@ -0,0 +1,98 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade.
type PersistentVolumeUpgradeTest struct {
pvSource *v1.PersistentVolumeSource
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
}
const (
pvTestFile string = "/mnt/pv_upgrade_test"
pvTestData string = "keep it pv"
pvWriteCmd string = "echo \"" + pvTestData + "\" > " + pvTestFile
pvReadCmd string = "cat " + pvTestFile
)
func (t *PersistentVolumeUpgradeTest) createGCEVolume() *v1.PersistentVolumeSource {
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err)
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
}
}
func (t *PersistentVolumeUpgradeTest) deleteGCEVolume(pvSource *v1.PersistentVolumeSource) {
framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
}
// Setup creates a pv and then verifies that a pod can consume it. The pod writes data to the volume.
func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
// TODO: generalize this to other providers
framework.SkipUnlessProviderIs("gce", "gke")
ns := f.Namespace.Name
By("Initializing PV source")
t.pvSource = t.createGCEVolume()
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "pv-upgrade",
PVSource: *t.pvSource,
Prebind: nil,
}
By("Creating the PV and PVC")
t.pv, t.pvc = framework.CreatePVPVC(f.ClientSet, pvConfig, ns, true)
framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc)
By("Consuming the PV before upgrade")
t.testPod(f, pvWriteCmd+";"+pvReadCmd)
}
// Test waits for the upgrade to complete, and then verifies that a pod can still consume the pv
// and that the volume data persists.
func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Consuming the PV after upgrade")
t.testPod(f, pvReadCmd)
}
// Teardown cleans up any remaining resources.
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, t.pv, t.pvc)
t.deleteGCEVolume(t.pvSource)
}
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) {
pod := framework.MakePod(f.Namespace.Name, t.pvc.Name, false, cmd)
expectedOutput := []string{pvTestData}
f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput)
}

View File

@ -815,11 +815,11 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
}
By("creating a test gce pd volume")
volumeName, err := createPDWithRetry()
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
deletePDWithRetry(volumeName)
framework.DeletePDWithRetry(volumeName)
}()
defer func() {