mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Move disruptive tests to testsuites and add ones for block volume
This commit is contained in:
parent
acdbbb3993
commit
f1d2d9d670
@ -2669,6 +2669,36 @@ func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddr
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetHostAddress gets the node for a pod and returns the first
|
||||||
|
// address. Returns an error if the node the pod is on doesn't have an
|
||||||
|
// address.
|
||||||
|
func GetHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
|
||||||
|
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// Try externalAddress first
|
||||||
|
for _, address := range node.Status.Addresses {
|
||||||
|
if address.Type == v1.NodeExternalIP {
|
||||||
|
if address.Address != "" {
|
||||||
|
return address.Address, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If no externalAddress found, try internalAddress
|
||||||
|
for _, address := range node.Status.Addresses {
|
||||||
|
if address.Type == v1.NodeInternalIP {
|
||||||
|
if address.Address != "" {
|
||||||
|
return address.Address, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not found, return error
|
||||||
|
return "", fmt.Errorf("No address for pod %v on node %v",
|
||||||
|
p.Name, p.Spec.NodeName)
|
||||||
|
}
|
||||||
|
|
||||||
type extractRT struct {
|
type extractRT struct {
|
||||||
http.Header
|
http.Header
|
||||||
}
|
}
|
||||||
|
@ -49,6 +49,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
|
|||||||
testsuites.InitProvisioningTestSuite,
|
testsuites.InitProvisioningTestSuite,
|
||||||
testsuites.InitSnapshottableTestSuite,
|
testsuites.InitSnapshottableTestSuite,
|
||||||
testsuites.InitMultiVolumeTestSuite,
|
testsuites.InitMultiVolumeTestSuite,
|
||||||
|
testsuites.InitDisruptiveTestSuite,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This executes testSuites for csi volumes.
|
// This executes testSuites for csi volumes.
|
||||||
|
1
test/e2e/storage/external/external.go
vendored
1
test/e2e/storage/external/external.go
vendored
@ -49,6 +49,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
|
|||||||
testsuites.InitVolumeModeTestSuite,
|
testsuites.InitVolumeModeTestSuite,
|
||||||
testsuites.InitVolumesTestSuite,
|
testsuites.InitVolumesTestSuite,
|
||||||
testsuites.InitVolumeExpandTestSuite,
|
testsuites.InitVolumeExpandTestSuite,
|
||||||
|
testsuites.InitDisruptiveTestSuite,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -102,7 +102,9 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
|
|||||||
framework.ExpectEqual(len(pvs), 1)
|
framework.ExpectEqual(len(pvs), 1)
|
||||||
|
|
||||||
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
||||||
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
|
pod, err := framework.CreateSecPod(c, ns, pvcClaims,
|
||||||
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
|
nil, framework.PodStartTimeout)
|
||||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||||
return pod, pvc, pvs[0]
|
return pod, pvc, pvs[0]
|
||||||
}
|
}
|
||||||
|
@ -57,6 +57,7 @@ var testSuites = []func() testsuites.TestSuite{
|
|||||||
testsuites.InitProvisioningTestSuite,
|
testsuites.InitProvisioningTestSuite,
|
||||||
testsuites.InitMultiVolumeTestSuite,
|
testsuites.InitMultiVolumeTestSuite,
|
||||||
testsuites.InitVolumeExpandTestSuite,
|
testsuites.InitVolumeExpandTestSuite,
|
||||||
|
testsuites.InitDisruptiveTestSuite,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This executes testSuites for in-tree volumes.
|
// This executes testSuites for in-tree volumes.
|
||||||
|
@ -4,6 +4,7 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"base.go",
|
"base.go",
|
||||||
|
"disruptive.go",
|
||||||
"driveroperations.go",
|
"driveroperations.go",
|
||||||
"ephemeral.go",
|
"ephemeral.go",
|
||||||
"multivolume.go",
|
"multivolume.go",
|
||||||
|
154
test/e2e/storage/testsuites/disruptive.go
Normal file
154
test/e2e/storage/testsuites/disruptive.go
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package testsuites
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type disruptiveTestSuite struct {
|
||||||
|
tsInfo TestSuiteInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ TestSuite = &disruptiveTestSuite{}
|
||||||
|
|
||||||
|
// InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||||
|
func InitDisruptiveTestSuite() TestSuite {
|
||||||
|
return &disruptiveTestSuite{
|
||||||
|
tsInfo: TestSuiteInfo{
|
||||||
|
name: "disruptive",
|
||||||
|
featureTag: "[Disruptive]",
|
||||||
|
testPatterns: []testpatterns.TestPattern{
|
||||||
|
testpatterns.DefaultFsInlineVolume,
|
||||||
|
testpatterns.FsVolModePreprovisionedPV,
|
||||||
|
testpatterns.FsVolModeDynamicPV,
|
||||||
|
testpatterns.BlockVolModePreprovisionedPV,
|
||||||
|
testpatterns.BlockVolModeDynamicPV,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *disruptiveTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||||
|
return s.tsInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
|
type local struct {
|
||||||
|
config *PerTestConfig
|
||||||
|
testCleanup func()
|
||||||
|
|
||||||
|
cs clientset.Interface
|
||||||
|
ns *v1.Namespace
|
||||||
|
|
||||||
|
// genericVolumeTestResource contains pv, pvc, sc, etc., owns cleaning that up
|
||||||
|
resource *genericVolumeTestResource
|
||||||
|
pod *v1.Pod
|
||||||
|
}
|
||||||
|
var l local
|
||||||
|
|
||||||
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
|
|
||||||
|
// This intentionally comes after checking the preconditions because it
|
||||||
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
|
// f must run inside an It or Context callback.
|
||||||
|
f := framework.NewDefaultFramework("disruptive")
|
||||||
|
|
||||||
|
init := func() {
|
||||||
|
l = local{}
|
||||||
|
l.ns = f.Namespace
|
||||||
|
l.cs = f.ClientSet
|
||||||
|
|
||||||
|
// Now do the more expensive test initialization.
|
||||||
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
|
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeBlock && !driver.GetDriverInfo().Capabilities[CapBlock] {
|
||||||
|
framework.Skipf("Driver %s doesn't support %v -- skipping", driver.GetDriverInfo().Name, pattern.VolMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
if l.pod != nil {
|
||||||
|
ginkgo.By("Deleting pod")
|
||||||
|
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
|
||||||
|
framework.ExpectNoError(err, "while deleting pod")
|
||||||
|
l.pod = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.resource != nil {
|
||||||
|
l.resource.cleanupResource()
|
||||||
|
l.resource = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.testCleanup != nil {
|
||||||
|
l.testCleanup()
|
||||||
|
l.testCleanup = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
|
||||||
|
type disruptiveTest struct {
|
||||||
|
testItStmt string
|
||||||
|
runTestFile testBody
|
||||||
|
runTestBlock testBody
|
||||||
|
}
|
||||||
|
disruptiveTestTable := []disruptiveTest{
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
|
||||||
|
runTestFile: utils.TestKubeletRestartsAndRestoresMount,
|
||||||
|
runTestBlock: utils.TestKubeletRestartsAndRestoresMap,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.",
|
||||||
|
runTestFile: utils.TestVolumeUnmountsFromDeletedPod,
|
||||||
|
runTestBlock: utils.TestVolumeUnmapsFromDeletedPod,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.",
|
||||||
|
runTestFile: utils.TestVolumeUnmountsFromForceDeletedPod,
|
||||||
|
runTestBlock: utils.TestVolumeUnmapsFromForceDeletedPod,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range disruptiveTestTable {
|
||||||
|
func(t disruptiveTest) {
|
||||||
|
ginkgo.It(t.testItStmt, func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ginkgo.By("Creating a pod with pvc")
|
||||||
|
l.pod, err = framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||||
|
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||||
|
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||||
|
t.runTestBlock(l.cs, l.config.Framework, l.pod)
|
||||||
|
} else {
|
||||||
|
t.runTestFile(l.cs, l.config.Framework, l.pod)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}(test)
|
||||||
|
}
|
||||||
|
}
|
@ -102,6 +102,16 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
|||||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
|
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSudoPresent(nodeIP string, provider string) bool {
|
||||||
|
e2elog.Logf("Checking if sudo command is present")
|
||||||
|
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||||
|
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
|
||||||
|
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
||||||
// for the desired statues..
|
// for the desired statues..
|
||||||
// - First issues the command via `systemctl`
|
// - First issues the command via `systemctl`
|
||||||
@ -110,23 +120,15 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
|||||||
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
||||||
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||||
command := ""
|
command := ""
|
||||||
sudoPresent := false
|
|
||||||
systemctlPresent := false
|
systemctlPresent := false
|
||||||
kubeletPid := ""
|
kubeletPid := ""
|
||||||
|
|
||||||
nodeIP, err := framework.GetHostExternalAddress(c, pod)
|
nodeIP, err := framework.GetHostAddress(c, pod)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
nodeIP = nodeIP + ":22"
|
nodeIP = nodeIP + ":22"
|
||||||
|
|
||||||
e2elog.Logf("Checking if sudo command is present")
|
|
||||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
|
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
|
||||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
|
||||||
sudoPresent = true
|
|
||||||
}
|
|
||||||
|
|
||||||
e2elog.Logf("Checking if systemctl command is present")
|
e2elog.Logf("Checking if systemctl command is present")
|
||||||
sshResult, err = e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||||
@ -134,6 +136,8 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||||||
} else {
|
} else {
|
||||||
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
|
||||||
if sudoPresent {
|
if sudoPresent {
|
||||||
command = fmt.Sprintf("sudo %s", command)
|
command = fmt.Sprintf("sudo %s", command)
|
||||||
}
|
}
|
||||||
@ -197,26 +201,44 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
|||||||
|
|
||||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||||
|
path := "/mnt/volume1"
|
||||||
|
byteLen := 64
|
||||||
|
seed := time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By("Writing to the volume.")
|
ginkgo.By("Writing to the volume.")
|
||||||
file := "/mnt/_SUCCESS"
|
CheckWriteToPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||||
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
|
|
||||||
e2elog.Logf(out)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
ginkgo.By("Restarting kubelet")
|
ginkgo.By("Restarting kubelet")
|
||||||
KubeletCommand(KRestart, c, clientPod)
|
KubeletCommand(KRestart, c, clientPod)
|
||||||
|
|
||||||
ginkgo.By("Testing that written file is accessible.")
|
ginkgo.By("Testing that written file is accessible.")
|
||||||
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
|
CheckReadFromPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||||
e2elog.Logf(out)
|
|
||||||
framework.ExpectNoError(err)
|
e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||||
e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
|
}
|
||||||
|
|
||||||
|
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
|
||||||
|
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||||
|
path := "/mnt/volume1"
|
||||||
|
byteLen := 64
|
||||||
|
seed := time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
|
ginkgo.By("Writing to the volume.")
|
||||||
|
CheckWriteToPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||||
|
|
||||||
|
ginkgo.By("Restarting kubelet")
|
||||||
|
KubeletCommand(KRestart, c, clientPod)
|
||||||
|
|
||||||
|
ginkgo.By("Testing that written pv is accessible.")
|
||||||
|
CheckReadFromPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||||
|
|
||||||
|
e2elog.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
||||||
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
|
nodeIP, err := framework.GetHostAddress(c, clientPod)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
nodeIP = nodeIP + ":22"
|
nodeIP = nodeIP + ":22"
|
||||||
|
|
||||||
@ -289,6 +311,76 @@ func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.F
|
|||||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||||
|
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||||
|
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
|
||||||
|
nodeIP, err := framework.GetHostAddress(c, clientPod)
|
||||||
|
framework.ExpectNoError(err, "Failed to get nodeIP.")
|
||||||
|
nodeIP = nodeIP + ":22"
|
||||||
|
|
||||||
|
// Creating command to check whether path exists
|
||||||
|
command := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
|
||||||
|
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
|
||||||
|
command = fmt.Sprintf("sudo sh -c \"%s\"", command)
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
|
||||||
|
result, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||||
|
e2essh.LogResult(result)
|
||||||
|
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||||
|
gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||||
|
|
||||||
|
// TODO: Needs to check GetGlobalMapPath and descriptor lock, as well.
|
||||||
|
|
||||||
|
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||||
|
defer func() {
|
||||||
|
KubeletCommand(KStart, c, clientPod)
|
||||||
|
}()
|
||||||
|
ginkgo.By("Stopping the kubelet.")
|
||||||
|
KubeletCommand(KStop, c, clientPod)
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||||
|
if forceDelete {
|
||||||
|
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
|
||||||
|
} else {
|
||||||
|
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
|
||||||
|
}
|
||||||
|
framework.ExpectNoError(err, "Failed to delete pod.")
|
||||||
|
|
||||||
|
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
|
||||||
|
KubeletCommand(KStart, c, clientPod)
|
||||||
|
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.ExpectNoError(err, "Expected pod to be not found.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if forceDelete {
|
||||||
|
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
|
||||||
|
// so wait some time to finish
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
|
||||||
|
result, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||||
|
e2essh.LogResult(result)
|
||||||
|
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||||
|
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
|
||||||
|
|
||||||
|
// TODO: Needs to check GetGlobalMapPath and descriptor lock, as well.
|
||||||
|
|
||||||
|
e2elog.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||||
|
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||||
|
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
|
||||||
|
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||||
|
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
|
||||||
|
}
|
||||||
|
|
||||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||||
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
|
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
|
Loading…
Reference in New Issue
Block a user