Merge pull request #89629 from fatedier/subpath

Modified subpath configmap mount fails when container restarts
This commit is contained in:
Kubernetes Prow Robot 2020-07-09 09:06:32 -07:00 committed by GitHub
commit d9b084a9d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 151 additions and 216 deletions

View File

@ -158,6 +158,11 @@ func (hu *HostUtil) EvalHostSymlinks(pathname string) (string, error) {
return filepath.EvalSymlinks(pathname)
}
// FindMountInfo returns the mount info on the given path.
func (hu *HostUtil) FindMountInfo(path string) (mount.MountInfo, error) {
return findMountInfo(path, procMountInfoPath)
}
// isShared returns true, if given path is on a mount point that has shared
// mount propagation.
func isShared(mount string, mountInfoPath string) (bool, error) {

View File

@ -12,6 +12,7 @@ go_library(
visibility = ["//visibility:public"],
deps = select({
"@io_bazel_rules_go//go/platform:android": [
"//pkg/volume/util/hostutil:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/mount:go_default_library",
@ -33,6 +34,7 @@ go_library(
"//vendor/k8s.io/utils/nsenter:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/volume/util/hostutil:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/mount:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"golang.org/x/sys/unix"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/utils/mount"
)
@ -108,9 +109,21 @@ func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, strin
notMount = true
}
if !notMount {
// It's already mounted
klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget)
return true, bindPathTarget, nil
linuxHostUtil := hostutil.NewHostUtil()
mntInfo, err := linuxHostUtil.FindMountInfo(bindPathTarget)
if err != nil {
return false, "", fmt.Errorf("error calling findMountInfo for %s: %s", bindPathTarget, err)
}
if mntInfo.Root != subpath.Path {
// It's already mounted but not what we want, unmount it
if err = mounter.Unmount(bindPathTarget); err != nil {
return false, "", fmt.Errorf("error ummounting %s: %s", bindPathTarget, err)
}
} else {
// It's already mounted
klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget)
return true, bindPathTarget, nil
}
}
// bindPathTarget is in /var/lib/kubelet and thus reachable without any

View File

@ -431,14 +431,6 @@
environment variables when backticks are supplied.
release: v1.19
file: test/e2e/common/expansion.go
- testname: VolumeSubpathEnvExpansion, subpath lifecycle
codename: '[k8s.io] Variable Expansion should not change the subpath mount on a
container restart if the environment variable changes [sig-storage][Slow] [Conformance]'
description: "Verify should not change the subpath mount on a container restart
if the environment variable changes 1.\tvalid subpathexpr starts a container running
2.\ttest for valid subpath writes 3.\tcontainer restarts 4.\tdelete cleanly"
release: v1.19
file: test/e2e/common/expansion.go
- testname: VolumeSubpathEnvExpansion, subpath test writes
codename: '[k8s.io] Variable Expansion should succeed in writing subpaths in container
[sig-storage][Slow] [Conformance]'

View File

@ -17,14 +17,9 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -362,110 +357,6 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod")
})
/*
Release : v1.19
Testname: VolumeSubpathEnvExpansion, subpath lifecycle
Description: Verify should not change the subpath mount on a container restart if the environment variable changes
1. valid subpathexpr starts a container running
2. test for valid subpath writes
3. container restarts
4. delete cleanly
*/
framework.ConformanceIt("should not change the subpath mount on a container restart if the environment variable changes [sig-storage][Slow]", func() {
envVars := []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations['mysubpath']",
},
},
},
}
mounts := []v1.VolumeMount{
{
Name: "workdir1",
MountPath: "/subpath_mount",
},
{
Name: "workdir1",
MountPath: "/volume_mount",
},
}
subpathMounts := []v1.VolumeMount{
{
Name: "workdir1",
MountPath: "/subpath_mount",
SubPathExpr: "$(POD_NAME)",
},
{
Name: "workdir1",
MountPath: "/volume_mount",
},
}
volumes := []v1.Volume{
{
Name: "workdir1",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
}
pod := newPod([]string{"/bin/sh", "-ec", "sleep 100000"}, envVars, subpathMounts, volumes)
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.ObjectMeta.Annotations = map[string]string{"mysubpath": "foo"}
sideContainerName := "side-container"
pod.Spec.Containers = append(pod.Spec.Containers, newContainer(sideContainerName, []string{"/bin/sh", "-ec", "sleep 100000"}, envVars, subpathMounts))
suffix := string(uuid.NewUUID())
pod.Spec.InitContainers = []v1.Container{newContainer(
fmt.Sprintf("init-volume-%s", suffix), []string{"sh", "-c", "mkdir -p /volume_mount/foo; touch /volume_mount/foo/test.log"}, nil, mounts)}
// Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/subpath_mount/test.log"},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
// Start pod
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
var podClient *framework.PodClient = f.PodClient()
pod = podClient.Create(pod)
defer func() {
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("updating the pod")
podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) {
pod.ObjectMeta.Annotations = map[string]string{"mysubpath": "newsubpath"}
})
ginkgo.By("waiting for pod and container restart")
waitForPodContainerRestart(f, pod, "/volume_mount/foo/test.log")
ginkgo.By("test for subpath mounted with old value")
cmd := "test -f /volume_mount/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to verify old file exists")
}
cmd = "test ! -f /volume_mount/newsubpath/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to verify new file does not exist")
}
})
})
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
@ -480,72 +371,6 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
framework.ExpectError(err, "while waiting for pod to be running")
}
// Tests that the existing subpath mount is detected when a container restarts
func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount string) {
ginkgo.By("Failing liveness probe")
stdout, stderr, err := f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("rm %v", volumeMount))
framework.Logf("Pod exec output: %v / %v", stdout, stderr)
framework.ExpectNoError(err, "while failing liveness probe")
// Check that container has restarted
ginkgo.By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
restarts = status.RestartCount
if restarts > 0 {
framework.Logf("Container has restart count: %v", restarts)
return true, nil
}
}
}
return false, nil
})
framework.ExpectNoError(err, "while waiting for container to restart")
// Fix liveness probe
ginkgo.By("Rewriting the file")
stdout = f.ExecShellInContainer(pod.Name, pod.Spec.Containers[1].Name, fmt.Sprintf("echo test-after > %v", volumeMount))
framework.Logf("Pod exec output: %v", stdout)
// Wait for container restarts to stabilize
ginkgo.By("Waiting for container to stop restarting")
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
if status.RestartCount == restarts {
stableCount++
if stableCount > stableThreshold {
framework.Logf("Container restart has stabilized")
return true, nil
}
} else {
restarts = status.RestartCount
stableCount = 0
framework.Logf("Container has restart count: %v", restarts)
}
break
}
}
return false, nil
})
framework.ExpectNoError(err, "while waiting for container to stabilize")
}
func newPod(command []string, envVars []v1.EnvVar, mounts []v1.VolumeMount, volumes []v1.Volume) *v1.Pod {
podName := "var-expansion-" + string(uuid.NewUUID())
return &v1.Pod{

View File

@ -18,14 +18,14 @@ package storage
import (
"context"
"k8s.io/api/core/v1"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
)
var _ = utils.SIGDescribe("Subpath", func() {
@ -48,7 +48,6 @@ var _ = utils.SIGDescribe("Subpath", func() {
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "while creating configmap")
}
})
/*
@ -125,5 +124,14 @@ var _ = utils.SIGDescribe("Subpath", func() {
}, privilegedSecurityContext)
testsuites.TestBasicSubpath(f, "configmap-value", pod)
})
})
ginkgo.Context("Container restart", func() {
ginkgo.It("should verify that container can restart successfully after configmaps modified", func() {
configmapToModify := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap-to-modify"}, Data: map[string]string{"configmap-key": "configmap-value"}}
configmapModified := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap-to-modify"}, Data: map[string]string{"configmap-key": "configmap-modified-value"}}
testsuites.TestPodContainerRestartWithConfigmapModified(f, configmapToModify, configmapModified)
})
})
})

View File

@ -28,6 +28,7 @@ import (
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/rand"
@ -798,25 +799,41 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT
return nil
}
// Tests that the existing subpath mount is detected when a container restarts
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
type podContainerRestartHooks struct {
AddLivenessProbeFunc func(pod *v1.Pod, probeFilePath string)
FailLivenessProbeFunc func(pod *v1.Pod, probeFilePath string)
FixLivenessProbeFunc func(pod *v1.Pod, probeFilePath string)
}
func (h *podContainerRestartHooks) AddLivenessProbe(pod *v1.Pod, probeFilePath string) {
if h.AddLivenessProbeFunc != nil {
h.AddLivenessProbeFunc(pod, probeFilePath)
}
}
func (h *podContainerRestartHooks) FailLivenessProbe(pod *v1.Pod, probeFilePath string) {
if h.FailLivenessProbeFunc != nil {
h.FailLivenessProbeFunc(pod, probeFilePath)
}
}
func (h *podContainerRestartHooks) FixLivenessProbe(pod *v1.Pod, probeFilePath string) {
if h.FixLivenessProbeFunc != nil {
h.FixLivenessProbeFunc(pod, probeFilePath)
}
}
// testPodContainerRestartWithHooks tests that container restarts to stabilize.
// hooks wrap functions between container restarts.
func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks *podContainerRestartHooks) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
// Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", probeFilePath},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
hooks.AddLivenessProbe(pod, probeFilePath)
// Start pod
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
@ -830,9 +847,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
framework.Logf("Pod exec output: %v", out)
framework.ExpectNoError(err, "while failing liveness probe")
hooks.FailLivenessProbe(pod, probeFilePath)
// Check that container has restarted
ginkgo.By("Waiting for container to restart")
@ -857,16 +872,8 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
framework.ExpectNoError(err, "while waiting for container to restart")
// Fix liveness probe
ginkgo.By("Rewriting the file")
var writeCmd string
if framework.NodeOSDistroIs("windows") {
writeCmd = fmt.Sprintf("echo test-after | Out-File -FilePath %v", probeFilePath)
} else {
writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
}
out, err = podContainerExec(pod, 1, writeCmd)
framework.Logf("Pod exec output: %v", out)
framework.ExpectNoError(err, "while rewriting the probe file")
ginkgo.By("Fix liveness probe")
hooks.FixLivenessProbe(pod, probeFilePath)
// Wait for container restarts to stabilize
ginkgo.By("Waiting for container to stop restarting")
@ -898,6 +905,89 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
framework.ExpectNoError(err, "while waiting for container to stabilize")
}
// testPodContainerRestart tests that the existing subpath mount is detected when a container restarts
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
testPodContainerRestartWithHooks(f, pod, &podContainerRestartHooks{
AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
p.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", probeFilePath},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
},
FailLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
out, err := podContainerExec(p, 1, fmt.Sprintf("rm %v", probeFilePath))
framework.Logf("Pod exec output: %v", out)
framework.ExpectNoError(err, "while failing liveness probe")
},
FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
ginkgo.By("Rewriting the file")
var writeCmd string
if framework.NodeOSDistroIs("windows") {
writeCmd = fmt.Sprintf("echo test-after | Out-File -FilePath %v", probeFilePath)
} else {
writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
}
out, err := podContainerExec(pod, 1, writeCmd)
framework.Logf("Pod exec output: %v", out)
framework.ExpectNoError(err, "while rewriting the probe file")
},
})
}
// TestPodContainerRestartWithConfigmapModified tests that container can restart to stabilize when configmap has been modified.
// 1. valid container running
// 2. update configmap
// 3. container restarts
// 4. container becomes stable after configmap mounted file has been modified
func TestPodContainerRestartWithConfigmapModified(f *framework.Framework, original, modified *v1.ConfigMap) {
ginkgo.By("Create configmap")
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), original, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "while creating configmap to modify")
}
var subpath string
for k := range original.Data {
subpath = k
break
}
pod := SubpathTestPod(f, subpath, "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: original.Name}}}, false)
pod.Spec.InitContainers[0].Command = e2evolume.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
modifiedValue := modified.Data[subpath]
testPodContainerRestartWithHooks(f, pod, &podContainerRestartHooks{
AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
p.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Expect probe file exist or configmap mounted file has been modified.
Command: []string{"sh", "-c", fmt.Sprintf("cat %s || test `cat %s` = '%s'", probeFilePath, volumePath, modifiedValue)},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
},
FailLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
out, err := podContainerExec(p, 1, fmt.Sprintf("rm %v", probeFilePath))
framework.Logf("Pod exec output: %v", out)
framework.ExpectNoError(err, "while failing liveness probe")
},
FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), modified, metav1.UpdateOptions{})
framework.ExpectNoError(err, "while fixing liveness probe")
},
})
}
func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()