mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Add e2e tests for volumeMode of persistent volume
This set of e2e tests is to confirm that persistent volume works well for all volumeModes. Coverage of the tests are shown in the figure of [Test cases], below. Once implementation policy is confirmed to be good, we can add plugins and test cases to this. [Test cases] # plugin volumeMode Test case Expectation --- ---------- -------------- ------------------------------------------------------ ------------ 1 iSCSI Block (a) Create Pod with PV and confirm Read/Write to PV Success 2 iSCSI FileSystem (a) Create Pod with PV and confirm Read/Write to PV Success 3 RBD Block (a) Create Pod with PV and confirm Read/Write to PV Success 4 RBD FileSystem (a) Create Pod with PV and confirm Read/Write to PV Success 5 CephFS Block (a) Create Pod with PV and confirm Read/Write to PV Fail 6 CephFS FileSystem (a) Create Pod with PV and confirm Read/Write to PV Success 7 NFS Block (a) Create Pod with PV and confirm Read/Write to PV Fail 8 NFS FileSystem (a) Create Pod with PV and confirm Read/Write to PV Success fixes: #56803
This commit is contained in:
parent
24ab69d358
commit
1b06ba5072
@ -48,6 +48,12 @@ const (
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
)
|
||||
|
||||
var (
|
||||
// Common selinux labels
|
||||
SELinuxLabel = &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
)
|
||||
|
||||
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
||||
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
|
||||
// in the PV's name and if it is present. We must always Get the pv object before
|
||||
|
@ -15,6 +15,7 @@ go_library(
|
||||
"persistent_volumes.go",
|
||||
"persistent_volumes-gce.go",
|
||||
"persistent_volumes-local.go",
|
||||
"persistent_volumes-volumemode.go",
|
||||
"pv_protection.go",
|
||||
"pvc_protection.go",
|
||||
"regional_pd.go",
|
||||
|
429
test/e2e/storage/persistent_volumes-volumemode.go
Normal file
429
test/e2e/storage/persistent_volumes-volumemode.go
Normal file
@ -0,0 +1,429 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
noProvisioner = "kubernetes.io/no-provisioner"
|
||||
pvNamePrefix = "pv"
|
||||
)
|
||||
|
||||
func generateConfigsForStaticProvisionPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
|
||||
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
|
||||
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
|
||||
// StorageClass
|
||||
scConfig := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scName,
|
||||
},
|
||||
Provisioner: noProvisioner,
|
||||
VolumeBindingMode: &volBindMode,
|
||||
}
|
||||
// PV
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
PVSource: pvSource,
|
||||
NamePrefix: pvNamePrefix,
|
||||
StorageClassName: scName,
|
||||
VolumeMode: &volMode,
|
||||
}
|
||||
// PVC
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &scName,
|
||||
VolumeMode: &volMode,
|
||||
}
|
||||
|
||||
return scConfig, pvConfig, pvcConfig
|
||||
}
|
||||
|
||||
func createPVTestResource(cs clientset.Interface, ns string,
|
||||
scConfig *storagev1.StorageClass, pvConfig framework.PersistentVolumeConfig,
|
||||
pvcConfig framework.PersistentVolumeClaimConfig) (*storagev1.StorageClass, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
|
||||
By("Creating sc")
|
||||
sc, err := cs.StorageV1().StorageClasses().Create(scConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pv and pvc")
|
||||
pv, pvc, err := framework.CreatePVPVC(cs, pvConfig, pvcConfig, ns, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns, pv, pvc))
|
||||
|
||||
By("Creating a pod")
|
||||
// TODO(mkimuram): Need to set anti-affinity with storage server pod.
|
||||
// Otherwise, storage server pod can also be affected on destructive tests.
|
||||
pod, err := framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return sc, pod, pv, pvc
|
||||
}
|
||||
|
||||
func createPVTestResourceWithFailure(cs clientset.Interface, ns string,
|
||||
scConfig *storagev1.StorageClass, pvConfig framework.PersistentVolumeConfig,
|
||||
pvcConfig framework.PersistentVolumeClaimConfig) (*storagev1.StorageClass, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
|
||||
By("Creating sc")
|
||||
sc, err := cs.StorageV1().StorageClasses().Create(scConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pv and pvc")
|
||||
pv, pvc, err := framework.CreatePVPVC(cs, pvConfig, pvcConfig, ns, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns, pv, pvc))
|
||||
|
||||
By("Creating a pod")
|
||||
pod, err := framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
return sc, pod, pv, pvc
|
||||
}
|
||||
|
||||
func deletePVTestResource(f *framework.Framework, cs clientset.Interface, ns string, sc *storagev1.StorageClass,
|
||||
pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
By("Deleting pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||
|
||||
By("Deleting pv and pvc")
|
||||
errs := framework.PVPVCCleanup(cs, ns, pv, pvc)
|
||||
if len(errs) > 0 {
|
||||
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
|
||||
By("Deleting sc")
|
||||
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(sc.Name, nil))
|
||||
}
|
||||
|
||||
func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
|
||||
|
||||
// Double check that it's not directory
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
} else {
|
||||
// Check if directory exists
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
|
||||
|
||||
// Double check that it's not block
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// random -> file1
|
||||
utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
// file1 -> dev (write to dev)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
// dev -> file2 (read from dev)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
// file1 == file2 (check contents)
|
||||
utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
|
||||
// Clean up temp files
|
||||
utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
|
||||
// Check that writing file to block volume fails
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
} else {
|
||||
// text -> file1 (write to file)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
// grep file1 (read from file and check contents)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
|
||||
|
||||
// Check that writing to directory as block volume fails
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
func skipBlockSupportTestIfUnsupported(volMode v1.PersistentVolumeMode, isBlockSupported bool) {
|
||||
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
framework.Skipf("Skip assertion for block test for block supported plugin.(Block unsupported)")
|
||||
}
|
||||
}
|
||||
|
||||
func skipBlockUnsupportTestUnlessUnspported(volMode v1.PersistentVolumeMode, isBlockSupported bool) {
|
||||
if !(volMode == v1.PersistentVolumeBlock && !isBlockSupported) {
|
||||
framework.Skipf("Skip assertion for block test for block unsupported plugin.(Block suppported or FileSystem test)")
|
||||
}
|
||||
}
|
||||
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-volumeMode", func() {
|
||||
f := framework.NewDefaultFramework("pv-volmode")
|
||||
const (
|
||||
pvTestSCPrefix = "pvtest"
|
||||
)
|
||||
|
||||
var (
|
||||
cs clientset.Interface
|
||||
ns string
|
||||
scName string
|
||||
isBlockSupported bool
|
||||
serverIP string
|
||||
secret *v1.Secret
|
||||
serverPod *v1.Pod
|
||||
pvSource v1.PersistentVolumeSource
|
||||
sc *storagev1.StorageClass
|
||||
pod *v1.Pod
|
||||
pv *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
volMode v1.PersistentVolumeMode
|
||||
volBindMode storagev1.VolumeBindingMode
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
volBindMode = storagev1.VolumeBindingImmediate
|
||||
})
|
||||
|
||||
AssertCreateDeletePodAndReadWriteVolume := func() {
|
||||
// For block supported plugins
|
||||
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||
skipBlockSupportTestIfUnsupported(volMode, isBlockSupported)
|
||||
|
||||
scConfig, pvConfig, pvcConfig := generateConfigsForStaticProvisionPVTest(scName, volBindMode, volMode, pvSource)
|
||||
sc, pod, pv, pvc = createPVTestResource(cs, ns, scConfig, pvConfig, pvcConfig)
|
||||
defer deletePVTestResource(f, cs, ns, sc, pod, pv, pvc)
|
||||
|
||||
By("Checking if persistent volume exists as expected volume mode")
|
||||
checkVolumeModeOfPath(pod, volMode, "/mnt/volume1")
|
||||
|
||||
By("Checking if read/write to persistent volume works properly")
|
||||
checkReadWriteToPath(pod, volMode, "/mnt/volume1")
|
||||
})
|
||||
|
||||
// For block unsupported plugins
|
||||
It("should fail to create pod by failing to mount volume", func() {
|
||||
skipBlockUnsupportTestUnlessUnspported(volMode, isBlockSupported)
|
||||
|
||||
scConfig, pvConfig, pvcConfig := generateConfigsForStaticProvisionPVTest(scName, volBindMode, volMode, pvSource)
|
||||
sc, pod, pv, pvc = createPVTestResourceWithFailure(cs, ns, scConfig, pvConfig, pvcConfig)
|
||||
deletePVTestResource(f, cs, ns, sc, pod, pv, pvc)
|
||||
})
|
||||
}
|
||||
|
||||
verifyAll := func() {
|
||||
AssertCreateDeletePodAndReadWriteVolume()
|
||||
// TODO(mkimuram): Add more tests
|
||||
}
|
||||
|
||||
Describe("NFS", func() {
|
||||
const pvTestNFSSCSuffix = "nfs"
|
||||
|
||||
BeforeEach(func() {
|
||||
isBlockSupported = false
|
||||
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestNFSSCSuffix)
|
||||
_, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
|
||||
|
||||
pvSource = v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
|
||||
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
|
||||
})
|
||||
|
||||
Context("FileSystem volume Test", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeFilesystem
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
|
||||
Context("Block volume Test[Feature:BlockVolume]", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeBlock
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
})
|
||||
|
||||
Describe("iSCSI [Feature:Volumes]", func() {
|
||||
const pvTestISCSISCSuffix = "iscsi"
|
||||
|
||||
BeforeEach(func() {
|
||||
isBlockSupported = true
|
||||
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestISCSISCSuffix)
|
||||
_, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
|
||||
|
||||
pvSource = v1.PersistentVolumeSource{
|
||||
ISCSI: &v1.ISCSIPersistentVolumeSource{
|
||||
TargetPortal: serverIP + ":3260",
|
||||
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
|
||||
Lun: 0,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
|
||||
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
|
||||
})
|
||||
|
||||
Context("FileSystem volume Test", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeFilesystem
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
|
||||
Context("Block volume Test[Feature:BlockVolume]", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeBlock
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Ceph-RBD [Feature:Volumes]", func() {
|
||||
const pvTestRBDSCSuffix = "rbd"
|
||||
|
||||
BeforeEach(func() {
|
||||
isBlockSupported = true
|
||||
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestRBDSCSuffix)
|
||||
_, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
|
||||
|
||||
framework.Logf("namespace: %v, secret.Name: %v", ns, secret.Name)
|
||||
pvSource = v1.PersistentVolumeSource{
|
||||
RBD: &v1.RBDPersistentVolumeSource{
|
||||
CephMonitors: []string{serverIP},
|
||||
RBDPool: "rbd",
|
||||
RBDImage: "foo",
|
||||
RadosUser: "admin",
|
||||
SecretRef: &v1.SecretReference{
|
||||
Name: secret.Name,
|
||||
Namespace: ns,
|
||||
},
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
|
||||
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
|
||||
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||
if secErr != nil || err != nil {
|
||||
if secErr != nil {
|
||||
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
|
||||
}
|
||||
framework.Failf("AfterEach: cleanup failed")
|
||||
}
|
||||
})
|
||||
|
||||
Context("FileSystem volume Test", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeFilesystem
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
|
||||
Context("Block volume Test[Feature:BlockVolume]", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeBlock
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CephFS [Feature:Volumes]", func() {
|
||||
const pvTestCephFSSCSuffix = "cephfs"
|
||||
|
||||
BeforeEach(func() {
|
||||
isBlockSupported = false
|
||||
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestCephFSSCSuffix)
|
||||
_, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
|
||||
|
||||
pvSource = v1.PersistentVolumeSource{
|
||||
CephFS: &v1.CephFSPersistentVolumeSource{
|
||||
Monitors: []string{serverIP + ":6789"},
|
||||
User: "kube",
|
||||
SecretRef: &v1.SecretReference{
|
||||
Name: secret.Name,
|
||||
Namespace: ns,
|
||||
},
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: deleting CephFS server secret %q...", secret.Name)
|
||||
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
framework.Logf("AfterEach: deleting CephFS server pod %q...", serverPod.Name)
|
||||
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||
if secErr != nil || err != nil {
|
||||
if secErr != nil {
|
||||
framework.Logf("AfterEach: CephFS delete secret failed: %v", secErr)
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("AfterEach: CephFS server pod delete failed: %v", err)
|
||||
}
|
||||
framework.Failf("AfterEach: cleanup failed")
|
||||
}
|
||||
})
|
||||
|
||||
Context("FileSystem volume Test", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeFilesystem
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
|
||||
Context("Block volume Test[Feature:BlockVolume]", func() {
|
||||
BeforeEach(func() {
|
||||
volMode = v1.PersistentVolumeBlock
|
||||
})
|
||||
|
||||
verifyAll()
|
||||
})
|
||||
})
|
||||
|
||||
})
|
@ -19,6 +19,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
uexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
type KubeletOpt string
|
||||
@ -43,6 +44,41 @@ func PodExec(pod *v1.Pod, bashExec string) (string, error) {
|
||||
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
|
||||
func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
|
||||
_, err := PodExec(pod, bashExec)
|
||||
if err != nil {
|
||||
if err, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := err.ExitStatus()
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
"%q should succeed, but failed with exit code %d and error message %q",
|
||||
bashExec, exitCode, err)
|
||||
} else {
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
"%q should succeed, but failed with error message %q",
|
||||
bashExec, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
|
||||
func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
||||
_, err := PodExec(pod, bashExec)
|
||||
if err != nil {
|
||||
if err, ok := err.(uexec.CodeExitError); ok {
|
||||
actualExitCode := err.ExitStatus()
|
||||
Expect(actualExitCode).To(Equal(exitCode),
|
||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
|
||||
bashExec, exitCode, actualExitCode, err)
|
||||
} else {
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
"%q should fail with exit code %d, but failed with error message %q",
|
||||
bashExec, exitCode, err)
|
||||
}
|
||||
}
|
||||
Expect(err).To(HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
|
||||
}
|
||||
|
||||
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
||||
// for the desired statues..
|
||||
// - First issues the command via `systemctl`
|
||||
|
Loading…
Reference in New Issue
Block a user