mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Enhance and refactor volume test util functions
Fix bash command's execution in MakePod(). Add isPriviledged as a parameter to MakePod(). Move PD utils to pv_util.go Ran all the tests in pd.go, persistent_volumes.go, persistent_volumes-disruptive.go. These changes are needed for the PV upgrade test I am working on.
This commit is contained in:
parent
c4e3ed51bc
commit
c522b77505
@ -159,7 +159,6 @@ go_library(
|
|||||||
"//test/images/net/nat:go_default_library",
|
"//test/images/net/nat:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor:github.com/aws/aws-sdk-go/aws",
|
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||||
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
|
|
||||||
"//vendor:github.com/aws/aws-sdk-go/aws/session",
|
"//vendor:github.com/aws/aws-sdk-go/aws/session",
|
||||||
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
|
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
|
||||||
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
||||||
|
@ -54,6 +54,7 @@ go_library(
|
|||||||
"//pkg/client/conditions:go_default_library",
|
"//pkg/client/conditions:go_default_library",
|
||||||
"//pkg/client/unversioned/remotecommand:go_default_library",
|
"//pkg/client/unversioned/remotecommand:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
|
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/deployment/util:go_default_library",
|
"//pkg/controller/deployment/util:go_default_library",
|
||||||
@ -76,6 +77,10 @@ go_library(
|
|||||||
"//test/e2e/generated:go_default_library",
|
"//test/e2e/generated:go_default_library",
|
||||||
"//test/e2e/perftype:go_default_library",
|
"//test/e2e/perftype:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||||
|
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
|
||||||
|
"//vendor:github.com/aws/aws-sdk-go/aws/session",
|
||||||
|
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:github.com/google/cadvisor/info/v1",
|
"//vendor:github.com/google/cadvisor/info/v1",
|
||||||
"//vendor:github.com/onsi/ginkgo",
|
"//vendor:github.com/onsi/ginkgo",
|
||||||
|
@ -18,20 +18,34 @@ package framework
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||||
|
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
PDRetryTimeout = 5 * time.Minute
|
||||||
|
PDRetryPollTime = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
||||||
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
|
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
|
||||||
// in the PV's name and if it is present. We must always Get the pv object before
|
// in the PV's name and if it is present. We must always Get the pv object before
|
||||||
@ -506,17 +520,16 @@ func MakePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
|
|||||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||||
// name.
|
// name.
|
||||||
func MakeWritePod(ns string, pvcName string) *v1.Pod {
|
func MakeWritePod(ns string, pvcName string) *v1.Pod {
|
||||||
return MakePod(ns, pvcName, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
return MakePod(ns, pvcName, true, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||||
func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
|
func MakePod(ns string, pvcName string, isPrivileged bool, command string) *v1.Pod {
|
||||||
|
|
||||||
if len(command) == 0 {
|
if len(command) == 0 {
|
||||||
command = []string{"while true; do sleep 1; done"}
|
command = "while true; do sleep 1; done"
|
||||||
}
|
}
|
||||||
var isPrivileged bool = true
|
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
@ -531,8 +544,8 @@ func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
|
|||||||
{
|
{
|
||||||
Name: "write-pod",
|
Name: "write-pod",
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
Command: []string{"/bin/sh", "-c"},
|
Command: []string{"/bin/sh"},
|
||||||
Args: command,
|
Args: []string{"-c", command},
|
||||||
VolumeMounts: []v1.VolumeMount{
|
VolumeMounts: []v1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: pvcName,
|
Name: pvcName,
|
||||||
@ -561,7 +574,7 @@ func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
|
|||||||
|
|
||||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||||
clientPod := MakePod(ns, pvc.Name)
|
clientPod := MakePod(ns, pvc.Name, true, "")
|
||||||
clientPod, err := c.Core().Pods(ns).Create(clientPod)
|
clientPod, err := c.Core().Pods(ns).Create(clientPod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
@ -572,3 +585,106 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC
|
|||||||
Expect(apierrs.IsNotFound(err)).To(BeFalse())
|
Expect(apierrs.IsNotFound(err)).To(BeFalse())
|
||||||
return clientPod
|
return clientPod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreatePDWithRetry() (string, error) {
|
||||||
|
newDiskName := ""
|
||||||
|
var err error
|
||||||
|
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
||||||
|
if newDiskName, err = createPD(); err != nil {
|
||||||
|
Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
Logf("Successfully created a new PD: %q.", newDiskName)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return newDiskName, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeletePDWithRetry(diskName string) {
|
||||||
|
var err error
|
||||||
|
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
||||||
|
if err = deletePD(diskName); err != nil {
|
||||||
|
Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
Logf("Successfully deleted PD %q.", diskName)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ExpectNoError(err, "Error deleting PD")
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPD() (string, error) {
|
||||||
|
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||||
|
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||||
|
|
||||||
|
gceCloud, err := GetGCECloud()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{}
|
||||||
|
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return pdName, nil
|
||||||
|
} else if TestContext.Provider == "aws" {
|
||||||
|
client := ec2.New(session.New())
|
||||||
|
|
||||||
|
request := &ec2.CreateVolumeInput{}
|
||||||
|
request.AvailabilityZone = aws.String(TestContext.CloudConfig.Zone)
|
||||||
|
request.Size = aws.Int64(10)
|
||||||
|
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
||||||
|
response, err := client.CreateVolume(request)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
az := aws.StringValue(response.AvailabilityZone)
|
||||||
|
awsID := aws.StringValue(response.VolumeId)
|
||||||
|
|
||||||
|
volumeName := "aws://" + az + "/" + awsID
|
||||||
|
return volumeName, nil
|
||||||
|
} else {
|
||||||
|
return "", fmt.Errorf("Provider does not support volume creation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deletePD(pdName string) error {
|
||||||
|
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||||
|
gceCloud, err := GetGCECloud()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gceCloud.DeleteDisk(pdName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||||
|
// PD already exists, ignore error.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
Logf("Error deleting PD %q: %v", pdName, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
} else if TestContext.Provider == "aws" {
|
||||||
|
client := ec2.New(session.New())
|
||||||
|
|
||||||
|
tokens := strings.Split(pdName, "/")
|
||||||
|
awsVolumeID := tokens[len(tokens)-1]
|
||||||
|
|
||||||
|
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
||||||
|
_, err := client.DeleteVolume(request)
|
||||||
|
if err != nil {
|
||||||
|
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||||
|
Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Provider does not support volume deletion")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
128
test/e2e/pd.go
128
test/e2e/pd.go
@ -26,7 +26,6 @@ import (
|
|||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -38,8 +37,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
|
||||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -48,8 +45,6 @@ const (
|
|||||||
gcePDDetachPollTime = 10 * time.Second
|
gcePDDetachPollTime = 10 * time.Second
|
||||||
nodeStatusTimeout = 1 * time.Minute
|
nodeStatusTimeout = 1 * time.Minute
|
||||||
nodeStatusPollTime = 1 * time.Second
|
nodeStatusPollTime = 1 * time.Second
|
||||||
gcePDRetryTimeout = 5 * time.Minute
|
|
||||||
gcePDRetryPollTime = 5 * time.Second
|
|
||||||
maxReadRetry = 3
|
maxReadRetry = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -82,7 +77,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
|
|
||||||
By("creating PD")
|
By("creating PD")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD")
|
framework.ExpectNoError(err, "Error creating PD")
|
||||||
|
|
||||||
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||||
@ -146,7 +141,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
|
|
||||||
By("creating PD")
|
By("creating PD")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD")
|
framework.ExpectNoError(err, "Error creating PD")
|
||||||
|
|
||||||
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||||
@ -210,7 +205,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
By("creating PD")
|
By("creating PD")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD")
|
framework.ExpectNoError(err, "Error creating PD")
|
||||||
|
|
||||||
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||||
@ -262,7 +257,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
By("creating PD")
|
By("creating PD")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD")
|
framework.ExpectNoError(err, "Error creating PD")
|
||||||
|
|
||||||
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||||
@ -314,7 +309,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
|
|
||||||
By("creating PD")
|
By("creating PD")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD")
|
framework.ExpectNoError(err, "Error creating PD")
|
||||||
numContainers := 4
|
numContainers := 4
|
||||||
var host0Pod *v1.Pod
|
var host0Pod *v1.Pod
|
||||||
@ -367,10 +362,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
|
|
||||||
By("creating PD1")
|
By("creating PD1")
|
||||||
disk1Name, err := createPDWithRetry()
|
disk1Name, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD1")
|
framework.ExpectNoError(err, "Error creating PD1")
|
||||||
By("creating PD2")
|
By("creating PD2")
|
||||||
disk2Name, err := createPDWithRetry()
|
disk2Name, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating PD2")
|
framework.ExpectNoError(err, "Error creating PD2")
|
||||||
var host0Pod *v1.Pod
|
var host0Pod *v1.Pod
|
||||||
|
|
||||||
@ -430,7 +425,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.ExpectNoError(err, "Error getting group size")
|
framework.ExpectNoError(err, "Error getting group size")
|
||||||
|
|
||||||
By("Creating a pd")
|
By("Creating a pd")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating a pd")
|
framework.ExpectNoError(err, "Error creating a pd")
|
||||||
|
|
||||||
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
|
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
|
||||||
@ -483,7 +478,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
|
initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||||
framework.ExpectNoError(err, "Error getting group size")
|
framework.ExpectNoError(err, "Error getting group size")
|
||||||
By("Creating a pd")
|
By("Creating a pd")
|
||||||
diskName, err := createPDWithRetry()
|
diskName, err := framework.CreatePDWithRetry()
|
||||||
framework.ExpectNoError(err, "Error creating a pd")
|
framework.ExpectNoError(err, "Error creating a pd")
|
||||||
|
|
||||||
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
|
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
|
||||||
@ -531,33 +526,6 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func createPDWithRetry() (string, error) {
|
|
||||||
newDiskName := ""
|
|
||||||
var err error
|
|
||||||
for start := time.Now(); time.Since(start) < gcePDRetryTimeout; time.Sleep(gcePDRetryPollTime) {
|
|
||||||
if newDiskName, err = createPD(); err != nil {
|
|
||||||
framework.Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
framework.Logf("Successfully created a new PD: %q.", newDiskName)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return newDiskName, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func deletePDWithRetry(diskName string) {
|
|
||||||
var err error
|
|
||||||
for start := time.Now(); time.Since(start) < gcePDRetryTimeout; time.Sleep(gcePDRetryPollTime) {
|
|
||||||
if err = deletePD(diskName); err != nil {
|
|
||||||
framework.Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
framework.Logf("Successfully deleted PD %q.", diskName)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
framework.ExpectNoError(err, "Error deleting PD")
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
|
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
|
||||||
for filePath, expectedContents := range fileAndContentToVerify {
|
for filePath, expectedContents := range fileAndContentToVerify {
|
||||||
var value string
|
var value string
|
||||||
@ -585,82 +553,6 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPD() (string, error) {
|
|
||||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
|
||||||
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
|
|
||||||
|
|
||||||
gceCloud, err := framework.GetGCECloud()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := map[string]string{}
|
|
||||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return pdName, nil
|
|
||||||
} else if framework.TestContext.Provider == "aws" {
|
|
||||||
client := ec2.New(session.New())
|
|
||||||
|
|
||||||
request := &ec2.CreateVolumeInput{}
|
|
||||||
request.AvailabilityZone = aws.String(cloudConfig.Zone)
|
|
||||||
request.Size = aws.Int64(10)
|
|
||||||
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
|
||||||
response, err := client.CreateVolume(request)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
az := aws.StringValue(response.AvailabilityZone)
|
|
||||||
awsID := aws.StringValue(response.VolumeId)
|
|
||||||
|
|
||||||
volumeName := "aws://" + az + "/" + awsID
|
|
||||||
return volumeName, nil
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("Provider does not support volume creation")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func deletePD(pdName string) error {
|
|
||||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
|
||||||
gceCloud, err := framework.GetGCECloud()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = gceCloud.DeleteDisk(pdName)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
|
||||||
// PD already exists, ignore error.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
framework.Logf("Error deleting PD %q: %v", pdName, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
} else if framework.TestContext.Provider == "aws" {
|
|
||||||
client := ec2.New(session.New())
|
|
||||||
|
|
||||||
tokens := strings.Split(pdName, "/")
|
|
||||||
awsVolumeID := tokens[len(tokens)-1]
|
|
||||||
|
|
||||||
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
|
||||||
_, err := client.DeleteVolume(request)
|
|
||||||
if err != nil {
|
|
||||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
|
||||||
framework.Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("Provider does not support volume deletion")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func detachPD(nodeName types.NodeName, pdName string) error {
|
func detachPD(nodeName types.NodeName, pdName string) error {
|
||||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||||
gceCloud, err := framework.GetGCECloud()
|
gceCloud, err := framework.GetGCECloud()
|
||||||
@ -806,7 +698,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
|
|||||||
waitForPDDetach(diskName, host)
|
waitForPDDetach(diskName, host)
|
||||||
}
|
}
|
||||||
By(fmt.Sprintf("Deleting PD %q", diskName))
|
By(fmt.Sprintf("Deleting PD %q", diskName))
|
||||||
deletePDWithRetry(diskName)
|
framework.DeletePDWithRetry(diskName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForPDInVolumesInUse(
|
func waitForPDInVolumesInUse(
|
||||||
|
@ -189,7 +189,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
|
|||||||
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
|
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
|
||||||
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||||
pv, pvc := framework.CreatePVPVC(c, pvConfig, ns, false)
|
pv, pvc := framework.CreatePVPVC(c, pvConfig, ns, false)
|
||||||
pod := framework.MakePod(ns, pvc.Name)
|
pod := framework.MakePod(ns, pvc.Name, true, "")
|
||||||
pod.Spec.NodeName = nodeName
|
pod.Spec.NodeName = nodeName
|
||||||
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
|
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
|
||||||
pod, err := c.Core().Pods(ns).Create(pod)
|
pod, err := c.Core().Pods(ns).Create(pod)
|
||||||
|
@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
|||||||
// If a file is detected in /mnt, fail the pod and do not restart it.
|
// If a file is detected in /mnt, fail the pod and do not restart it.
|
||||||
By("Verifying the mount has been cleaned.")
|
By("Verifying the mount has been cleaned.")
|
||||||
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||||
pod = framework.MakePod(ns, pvc.Name, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
pod = framework.MakePod(ns, pvc.Name, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
||||||
|
|
||||||
pod, err = c.Core().Pods(ns).Create(pod)
|
pod, err = c.Core().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -310,7 +310,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce")
|
framework.SkipUnlessProviderIs("gce")
|
||||||
By("Initializing Test Spec")
|
By("Initializing Test Spec")
|
||||||
if diskName == "" {
|
if diskName == "" {
|
||||||
diskName, err = createPDWithRetry()
|
diskName, err = framework.CreatePDWithRetry()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
pvConfig = framework.PersistentVolumeConfig{
|
pvConfig = framework.PersistentVolumeConfig{
|
||||||
NamePrefix: "gce-",
|
NamePrefix: "gce-",
|
||||||
@ -342,7 +342,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
|||||||
|
|
||||||
AddCleanupAction(func() {
|
AddCleanupAction(func() {
|
||||||
if len(diskName) > 0 {
|
if len(diskName) > 0 {
|
||||||
deletePDWithRetry(diskName)
|
framework.DeletePDWithRetry(diskName)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -815,11 +815,11 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
By("creating a test gce pd volume")
|
By("creating a test gce pd volume")
|
||||||
volumeName, err := createPDWithRetry()
|
volumeName, err := framework.CreatePDWithRetry()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
deletePDWithRetry(volumeName)
|
framework.DeletePDWithRetry(volumeName)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
Loading…
Reference in New Issue
Block a user