mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #75071 from mkimuram/issue/74552e2e
Fix volume reconstruction and add e2e tests
This commit is contained in:
commit
345e58b434
@ -470,6 +470,11 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock && mapperPlugin == nil {
|
||||
return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID)
|
||||
}
|
||||
|
||||
volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation(
|
||||
volume.volumeMode,
|
||||
plugin,
|
||||
@ -493,22 +498,48 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
uniqueVolumeName = util.GetUniqueVolumeNameFromSpecWithPod(volume.podName, plugin, volumeSpec)
|
||||
}
|
||||
|
||||
volumeMounter, newMounterErr := plugin.NewMounter(
|
||||
volumeSpec,
|
||||
pod,
|
||||
volumepkg.VolumeOptions{})
|
||||
if newMounterErr != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"reconstructVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeSpec.Name(),
|
||||
volume.podName,
|
||||
pod.UID,
|
||||
newMounterErr)
|
||||
var volumeMapper volumepkg.BlockVolumeMapper
|
||||
var volumeMounter volumepkg.Mounter
|
||||
// Path to the mount or block device to check
|
||||
var checkPath string
|
||||
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock {
|
||||
var newMapperErr error
|
||||
volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper(
|
||||
volumeSpec,
|
||||
pod,
|
||||
volumepkg.VolumeOptions{})
|
||||
if newMapperErr != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"reconstructVolume.NewBlockVolumeMapper failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeSpec.Name(),
|
||||
volume.podName,
|
||||
pod.UID,
|
||||
newMapperErr)
|
||||
}
|
||||
checkPath, _ = volumeMapper.GetPodDeviceMapPath()
|
||||
} else {
|
||||
var err error
|
||||
volumeMounter, err = plugin.NewMounter(
|
||||
volumeSpec,
|
||||
pod,
|
||||
volumepkg.VolumeOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"reconstructVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeSpec.Name(),
|
||||
volume.podName,
|
||||
pod.UID,
|
||||
err)
|
||||
}
|
||||
checkPath = volumeMounter.GetPath()
|
||||
}
|
||||
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volumeMounter.GetPath(), volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, checkPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
if checkErr != nil {
|
||||
return nil, checkErr
|
||||
}
|
||||
@ -517,27 +548,6 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
return nil, fmt.Errorf("Volume: %q is not mounted", uniqueVolumeName)
|
||||
}
|
||||
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
var volumeMapper volumepkg.BlockVolumeMapper
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock {
|
||||
var newMapperErr error
|
||||
if mapperPlugin != nil {
|
||||
volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper(
|
||||
volumeSpec,
|
||||
pod,
|
||||
volumepkg.VolumeOptions{})
|
||||
if newMapperErr != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"reconstructVolume.NewBlockVolumeMapper failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeSpec.Name(),
|
||||
volume.podName,
|
||||
pod.UID,
|
||||
newMapperErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reconstructedVolume := &reconstructedVolume{
|
||||
volumeName: uniqueVolumeName,
|
||||
podName: volume.podName,
|
||||
|
@ -543,10 +543,12 @@ type MountedVolume struct {
|
||||
|
||||
// Mounter is the volume mounter used to mount this volume. It is required
|
||||
// by kubelet to create container.VolumeMap.
|
||||
// Mounter is only required for file system volumes and not required for block volumes.
|
||||
Mounter volume.Mounter
|
||||
|
||||
// BlockVolumeMapper is the volume mapper used to map this volume. It is required
|
||||
// by kubelet to create container.VolumeMap.
|
||||
// BlockVolumeMapper is only required for block volumes and not required for file system volumes.
|
||||
BlockVolumeMapper volume.BlockVolumeMapper
|
||||
|
||||
// VolumeGidValue contains the value of the GID annotation, if present.
|
||||
@ -897,13 +899,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation(
|
||||
// Block Volume case
|
||||
// Create volumeSpec from mount path
|
||||
klog.V(5).Infof("Starting operationExecutor.ReconstructVolume")
|
||||
if mapperPlugin == nil {
|
||||
return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)",
|
||||
pluginName,
|
||||
volumeSpecName,
|
||||
podName,
|
||||
uid)
|
||||
}
|
||||
|
||||
// volumePath contains volumeName on the path. In the case of block volume, {volumeName} is symbolic link
|
||||
// corresponding to raw block device.
|
||||
// ex. volumePath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
|
||||
@ -935,6 +931,9 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation(
|
||||
if attachable != nil {
|
||||
var isNotMount bool
|
||||
var mountCheckErr error
|
||||
if mounter == nil {
|
||||
return false, fmt.Errorf("mounter was not set for a filesystem volume")
|
||||
}
|
||||
if isNotMount, mountCheckErr = mounter.IsLikelyNotMountPoint(mountPath); mountCheckErr != nil {
|
||||
return false, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v",
|
||||
uniqueVolumeName,
|
||||
|
@ -833,7 +833,7 @@ func makeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.Pers
|
||||
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
@ -874,17 +874,27 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
var volumeDevices = make([]v1.VolumeDevice, 0)
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources))
|
||||
volumeIndex := 0
|
||||
for _, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
}
|
||||
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
volumeIndex++
|
||||
}
|
||||
for _, src := range inlineVolumeSources {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
// In-line volumes can be only filesystem, not block.
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
|
||||
volumeIndex++
|
||||
}
|
||||
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Spec.Volumes = volumes
|
||||
@ -933,13 +943,13 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
|
||||
}
|
||||
|
||||
// CreateSecPod creates security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
|
||||
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
|
||||
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
|
||||
}
|
||||
|
||||
// CreateSecPodWithNodeSelection creates security pod with given claims
|
||||
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
// Setting node
|
||||
pod.Spec.NodeName = node.Name
|
||||
pod.Spec.NodeSelector = node.Selector
|
||||
|
@ -2670,6 +2670,36 @@ func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddr
|
||||
return
|
||||
}
|
||||
|
||||
// GetHostAddress gets the node for a pod and returns the first
|
||||
// address. Returns an error if the node the pod is on doesn't have an
|
||||
// address.
|
||||
func GetHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Try externalAddress first
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeExternalIP {
|
||||
if address.Address != "" {
|
||||
return address.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no externalAddress found, try internalAddress
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeInternalIP {
|
||||
if address.Address != "" {
|
||||
return address.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If not found, return error
|
||||
return "", fmt.Errorf("No address for pod %v on node %v",
|
||||
p.Name, p.Spec.NodeName)
|
||||
}
|
||||
|
||||
type extractRT struct {
|
||||
http.Header
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
|
||||
testsuites.InitProvisioningTestSuite,
|
||||
testsuites.InitSnapshottableTestSuite,
|
||||
testsuites.InitMultiVolumeTestSuite,
|
||||
testsuites.InitDisruptiveTestSuite,
|
||||
}
|
||||
|
||||
// This executes testSuites for csi volumes.
|
||||
|
1
test/e2e/storage/external/external.go
vendored
1
test/e2e/storage/external/external.go
vendored
@ -49,6 +49,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
|
||||
testsuites.InitVolumeModeTestSuite,
|
||||
testsuites.InitVolumesTestSuite,
|
||||
testsuites.InitVolumeExpandTestSuite,
|
||||
testsuites.InitDisruptiveTestSuite,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -102,7 +102,9 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
|
||||
framework.ExpectEqual(len(pvs), 1)
|
||||
|
||||
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
||||
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
|
||||
pod, err := framework.CreateSecPod(c, ns, pvcClaims, nil,
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, framework.PodStartTimeout)
|
||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||
return pod, pvc, pvs[0]
|
||||
}
|
||||
|
@ -57,6 +57,7 @@ var testSuites = []func() testsuites.TestSuite{
|
||||
testsuites.InitProvisioningTestSuite,
|
||||
testsuites.InitMultiVolumeTestSuite,
|
||||
testsuites.InitVolumeExpandTestSuite,
|
||||
testsuites.InitDisruptiveTestSuite,
|
||||
}
|
||||
|
||||
// This executes testSuites for in-tree volumes.
|
||||
|
@ -555,7 +555,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
|
||||
pod := framework.MakeSecPod(config.ns, pvcs, false, "sleep 1", false, false, selinuxLabel, nil)
|
||||
pod := framework.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil)
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
pods[pod.Name] = pod
|
||||
@ -648,7 +648,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count))
|
||||
for i := 0; i < count; i++ {
|
||||
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, selinuxLabel, nil)
|
||||
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil)
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
pods[pod.Name] = pod
|
||||
@ -939,7 +939,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
|
||||
}
|
||||
|
||||
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
|
||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
|
||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
@ -965,7 +965,7 @@ func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolu
|
||||
}
|
||||
|
||||
func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
|
||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
|
||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
@ -977,7 +977,7 @@ func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolu
|
||||
}
|
||||
|
||||
func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
|
||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
|
||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
@ -987,7 +987,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
|
||||
|
||||
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
|
||||
ginkgo.By("Creating a pod")
|
||||
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
|
||||
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
|
||||
}
|
||||
|
||||
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {
|
||||
|
@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"base.go",
|
||||
"disruptive.go",
|
||||
"driveroperations.go",
|
||||
"ephemeral.go",
|
||||
"multivolume.go",
|
||||
|
161
test/e2e/storage/testsuites/disruptive.go
Normal file
161
test/e2e/storage/testsuites/disruptive.go
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type disruptiveTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &disruptiveTestSuite{}
|
||||
|
||||
// InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
func InitDisruptiveTestSuite() TestSuite {
|
||||
return &disruptiveTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
name: "disruptive",
|
||||
featureTag: "[Disruptive]",
|
||||
testPatterns: []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.FsVolModePreprovisionedPV,
|
||||
testpatterns.FsVolModeDynamicPV,
|
||||
testpatterns.BlockVolModePreprovisionedPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
testCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
|
||||
// genericVolumeTestResource contains pv, pvc, sc, etc., owns cleaning that up
|
||||
resource *genericVolumeTestResource
|
||||
pod *v1.Pod
|
||||
}
|
||||
var l local
|
||||
|
||||
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||
|
||||
// This intentionally comes after checking the preconditions because it
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("disruptive")
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
l.ns = f.Namespace
|
||||
l.cs = f.ClientSet
|
||||
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !driver.GetDriverInfo().Capabilities[CapBlock] {
|
||||
framework.Skipf("Driver %s doesn't support %v -- skipping", driver.GetDriverInfo().Name, pattern.VolMode)
|
||||
}
|
||||
|
||||
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if l.pod != nil {
|
||||
ginkgo.By("Deleting pod")
|
||||
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
|
||||
framework.ExpectNoError(err, "while deleting pod")
|
||||
l.pod = nil
|
||||
}
|
||||
|
||||
if l.resource != nil {
|
||||
l.resource.cleanupResource()
|
||||
l.resource = nil
|
||||
}
|
||||
|
||||
if l.testCleanup != nil {
|
||||
l.testCleanup()
|
||||
l.testCleanup = nil
|
||||
}
|
||||
}
|
||||
|
||||
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
|
||||
type disruptiveTest struct {
|
||||
testItStmt string
|
||||
runTestFile testBody
|
||||
runTestBlock testBody
|
||||
}
|
||||
disruptiveTestTable := []disruptiveTest{
|
||||
{
|
||||
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
|
||||
runTestFile: utils.TestKubeletRestartsAndRestoresMount,
|
||||
runTestBlock: utils.TestKubeletRestartsAndRestoresMap,
|
||||
},
|
||||
{
|
||||
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.",
|
||||
runTestFile: utils.TestVolumeUnmountsFromDeletedPod,
|
||||
runTestBlock: utils.TestVolumeUnmapsFromDeletedPod,
|
||||
},
|
||||
{
|
||||
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.",
|
||||
runTestFile: utils.TestVolumeUnmountsFromForceDeletedPod,
|
||||
runTestBlock: utils.TestVolumeUnmapsFromForceDeletedPod,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range disruptiveTestTable {
|
||||
func(t disruptiveTest) {
|
||||
ginkgo.It(t.testItStmt, func() {
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
var err error
|
||||
var pvcs []*v1.PersistentVolumeClaim
|
||||
var inlineSources []*v1.VolumeSource
|
||||
if pattern.VolType == testpatterns.InlineVolume {
|
||||
inlineSources = append(inlineSources, l.resource.volSource)
|
||||
} else {
|
||||
pvcs = append(pvcs, l.resource.pvc)
|
||||
}
|
||||
ginkgo.By("Creating a pod with pvc")
|
||||
l.pod, err = framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
t.runTestBlock(l.cs, l.config.Framework, l.pod)
|
||||
} else {
|
||||
t.runTestFile(l.cs, l.config.Framework, l.pod)
|
||||
}
|
||||
})
|
||||
}(test)
|
||||
}
|
||||
}
|
@ -326,7 +326,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
|
||||
func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
|
||||
node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
|
||||
ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
|
||||
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs,
|
||||
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil,
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, node, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
@ -400,7 +400,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
index := i + 1
|
||||
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
|
||||
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns,
|
||||
[]*v1.PersistentVolumeClaim{pvc},
|
||||
[]*v1.PersistentVolumeClaim{pvc}, nil,
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, node, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
|
@ -153,7 +153,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
var err error
|
||||
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
||||
l.pod, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
l.pod, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod)
|
||||
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
||||
@ -197,7 +197,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
}
|
||||
|
||||
ginkgo.By("Creating a new pod with same volume")
|
||||
l.pod2, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
l.pod2, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod2)
|
||||
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
|
||||
@ -218,7 +218,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
var err error
|
||||
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
||||
l.pod, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
l.pod, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod)
|
||||
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
||||
|
@ -189,7 +189,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
||||
|
||||
ginkgo.By("Creating pod")
|
||||
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, false, "", false, false, framework.SELinuxLabel,
|
||||
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
|
||||
|
@ -102,6 +102,16 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
|
||||
}
|
||||
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
e2elog.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
||||
// for the desired statues..
|
||||
// - First issues the command via `systemctl`
|
||||
@ -110,23 +120,15 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
||||
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
||||
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
command := ""
|
||||
sudoPresent := false
|
||||
systemctlPresent := false
|
||||
kubeletPid := ""
|
||||
|
||||
nodeIP, err := framework.GetHostExternalAddress(c, pod)
|
||||
nodeIP, err := framework.GetHostAddress(c, pod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
e2elog.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
sudoPresent = true
|
||||
}
|
||||
|
||||
e2elog.Logf("Checking if systemctl command is present")
|
||||
sshResult, err = e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||
@ -134,6 +136,8 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
} else {
|
||||
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
||||
}
|
||||
|
||||
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
|
||||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
@ -197,26 +201,44 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
||||
|
||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
path := "/mnt/volume1"
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
file := "/mnt/_SUCCESS"
|
||||
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
|
||||
e2elog.Logf(out)
|
||||
framework.ExpectNoError(err)
|
||||
CheckWriteToPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
|
||||
e2elog.Logf(out)
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
|
||||
CheckReadFromPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
|
||||
e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
path := "/mnt/volume1"
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
|
||||
e2elog.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
||||
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
|
||||
nodeIP, err := framework.GetHostAddress(c, clientPod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
@ -289,6 +311,74 @@ func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.F
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
|
||||
nodeIP, err := framework.GetHostAddress(c, clientPod)
|
||||
framework.ExpectNoError(err, "Failed to get nodeIP.")
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
// Creating command to check whether path exists
|
||||
command := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
|
||||
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
|
||||
command = fmt.Sprintf("sudo sh -c \"%s\"", command)
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
|
||||
result, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
// TODO: Needs to check GetGlobalMapPath and descriptor lock, as well.
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
defer func() {
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
}()
|
||||
ginkgo.By("Stopping the kubelet.")
|
||||
KubeletCommand(KStop, c, clientPod)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||
if forceDelete {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
|
||||
} else {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
framework.ExpectNoError(err, "Failed to delete pod.")
|
||||
|
||||
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
|
||||
framework.ExpectNoError(err, "Expected pod to be not found.")
|
||||
|
||||
if forceDelete {
|
||||
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
|
||||
// so wait some time to finish
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
|
||||
result, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
|
||||
|
||||
// TODO: Needs to check GetGlobalMapPath and descriptor lock, as well.
|
||||
|
||||
e2elog.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
|
||||
}
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
|
||||
pod := &v1.Pod{
|
||||
|
@ -93,7 +93,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Consuming the PVC before downgrade")
|
||||
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
|
||||
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking if PV exists as expected volume mode")
|
||||
|
Loading…
Reference in New Issue
Block a user