From ab4389f174c11f0c7d08fb26e8c4ac0707118f65 Mon Sep 17 00:00:00 2001 From: carlory Date: Fri, 21 Feb 2025 15:07:13 +0800 Subject: [PATCH 1/3] add e2e test to reproduce unexpected unmount after kubelet is restarted Signed-off-by: carlory --- .../storage/csimock/csi_kubelet_restart.go | 122 ++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 test/e2e/storage/csimock/csi_kubelet_restart.go diff --git a/test/e2e/storage/csimock/csi_kubelet_restart.go b/test/e2e/storage/csimock/csi_kubelet_restart.go new file mode 100644 index 00000000000..2c98fdd9376 --- /dev/null +++ b/test/e2e/storage/csimock/csi_kubelet_restart.go @@ -0,0 +1,122 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csimock + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" + "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/storage/utils" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framework.WithSerial(), framework.WithDisruptive(), func() { + f := framework.NewDefaultFramework("csi-mock-when-kubelet-restart") + f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged + m := newMockDriverSetup(f) + + ginkgo.It("should not umount volume when the pvc is terminating but still used by a running pod", func(ctx context.Context) { + m.init(ctx, testParameters{ + registerDriver: true, + }) + ginkgo.DeferCleanup(m.cleanup) + + ginkgo.By("Creating a Pod with a PVC backed by a CSI volume") + _, pvc, pod := m.createPod(ctx, pvcReference) + + ginkgo.By("Waiting for the Pod to be running") + err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) + framework.ExpectNoError(err, "failed to wait for pod %s to be running", pod.Name) + + ginkgo.By("Deleting the PVC") + err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete PVC %s", pvc.Name) + + ginkgo.By("Restarting kubelet") + err = stopKindKubelet(ctx) + framework.ExpectNoError(err, "failed to stop kubelet") + err = startKindKubelet(ctx) + framework.ExpectNoError(err, "failed to start kubelet") + + ginkgo.By("Verifying the PVC is terminating during kubelet restart") + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get PVC %s", pvc.Name) + gomega.Expect(pvc.DeletionTimestamp).NotTo(gomega.BeNil(), "PVC %s should have deletion timestamp", pvc.Name) + + // FIXME: the expected behavior is no NodeUnpublishVolume call is made during kubelet restart + ginkgo.By(fmt.Sprintf("Verifying that the driver received NodeUnpublishVolume call for PVC %s", pvc.Name)) + gomega.Eventually(ctx, m.driver.GetCalls). + WithPolling(framework.Poll). + WithTimeout(framework.RestartNodeReadyAgainTimeout). + Should(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("NodeUnpublishVolume")))) + + // ginkgo.By(fmt.Sprintf("Verifying that the driver didn't receive NodeUnpublishVolume call for PVC %s", pvc.Name)) + // gomega.Consistently(ctx, m.driver.GetCalls). + // WithPolling(framework.Poll). + // WithTimeout(framework.ClaimProvisionShortTimeout). + // ShouldNot(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("NodeUnpublishVolume")))) + + // ginkgo.By("Verifying the Pod is still running") + // err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) + // framework.ExpectNoError(err, "failed to wait for pod %s to be running", pod.Name) + }) +}) + +func stopKindKubelet(ctx context.Context) error { + return kubeletExec("systemctl", "stop", "kubelet") +} + +func startKindKubelet(ctx context.Context) error { + return kubeletExec("systemctl", "start", "kubelet") +} + +// Run a command in container with kubelet (and the whole control plane as containers) +func kubeletExec(command ...string) error { + containerName := getKindContainerName() + args := []string{"exec", containerName} + args = append(args, command...) + cmd := exec.Command("docker", args...) + + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command %q failed: %v\noutput:%s", prettyCmd(cmd), err, string(out)) + } + + framework.Logf("command %q succeeded:\n%s", prettyCmd(cmd), string(out)) + return nil +} + +func getKindContainerName() string { + clusterName := os.Getenv("KIND_CLUSTER_NAME") + if clusterName == "" { + clusterName = "kind" + } + return clusterName + "-control-plane" +} + +func prettyCmd(cmd *exec.Cmd) string { + return fmt.Sprintf("%s %s", cmd.Path, strings.Join(cmd.Args, " ")) +} From 77de0b99c2b3c674c85b6d27beb37e847e6ee166 Mon Sep 17 00:00:00 2001 From: carlory Date: Fri, 21 Feb 2025 16:00:20 +0800 Subject: [PATCH 2/3] fix e2e Signed-off-by: carlory --- .../storage/csimock/csi_kubelet_restart.go | 34 +++++++++++-------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/test/e2e/storage/csimock/csi_kubelet_restart.go b/test/e2e/storage/csimock/csi_kubelet_restart.go index 2c98fdd9376..9d6ffdb3512 100644 --- a/test/e2e/storage/csimock/csi_kubelet_restart.go +++ b/test/e2e/storage/csimock/csi_kubelet_restart.go @@ -25,6 +25,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" @@ -66,22 +67,27 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo framework.ExpectNoError(err, "failed to get PVC %s", pvc.Name) gomega.Expect(pvc.DeletionTimestamp).NotTo(gomega.BeNil(), "PVC %s should have deletion timestamp", pvc.Name) - // FIXME: the expected behavior is no NodeUnpublishVolume call is made during kubelet restart - ginkgo.By(fmt.Sprintf("Verifying that the driver received NodeUnpublishVolume call for PVC %s", pvc.Name)) - gomega.Eventually(ctx, m.driver.GetCalls). + ginkgo.By(fmt.Sprintf("Verifying that the driver didn't receive NodeUnpublishVolume call for PVC %s", pvc.Name)) + gomega.Consistently(ctx, + func(ctx context.Context) interface{} { + calls, err := m.driver.GetCalls(ctx) + if err != nil { + if apierrors.IsUnexpectedServerError(err) { + // kubelet might not be ready yet when getting the calls + gomega.TryAgainAfter(framework.Poll).Wrap(err).Now() + return nil + } + return nil + } + return calls + }). WithPolling(framework.Poll). - WithTimeout(framework.RestartNodeReadyAgainTimeout). - Should(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("NodeUnpublishVolume")))) + WithTimeout(framework.ClaimProvisionShortTimeout). + ShouldNot(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("NodeUnpublishVolume")))) - // ginkgo.By(fmt.Sprintf("Verifying that the driver didn't receive NodeUnpublishVolume call for PVC %s", pvc.Name)) - // gomega.Consistently(ctx, m.driver.GetCalls). - // WithPolling(framework.Poll). - // WithTimeout(framework.ClaimProvisionShortTimeout). - // ShouldNot(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("NodeUnpublishVolume")))) - - // ginkgo.By("Verifying the Pod is still running") - // err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) - // framework.ExpectNoError(err, "failed to wait for pod %s to be running", pod.Name) + ginkgo.By("Verifying the Pod is still running") + err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) + framework.ExpectNoError(err, "failed to wait for pod %s to be running", pod.Name) }) }) From 3836d58744870248a74130ecf41e67cc201f2d48 Mon Sep 17 00:00:00 2001 From: carlory Date: Fri, 21 Feb 2025 18:36:06 +0800 Subject: [PATCH 3/3] fix handle terminating pvc when kubelet rebuild dsw Signed-off-by: carlory --- .../desired_state_of_world_populator.go | 19 ++++-- .../storage/csimock/csi_kubelet_restart.go | 64 ++++++------------- 2 files changed, 31 insertions(+), 52 deletions(-) diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 2d9b07a6021..ff47f777b3b 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -24,6 +24,7 @@ import ( "context" "errors" "fmt" + "slices" "sync" "time" @@ -558,15 +559,21 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err) } - // Pods that uses a PVC that is being deleted must not be started. + // Pods that uses a PVC that is being deleted and not protected by + // kubernetes.io/pvc-protection must not be started. // - // In case an old kubelet is running without this check or some kubelets - // have this feature disabled, the worst that can happen is that such - // pod is scheduled. This was the default behavior in 1.8 and earlier - // and users should not be that surprised. + // 1) In case an old kubelet is running without this check, the worst + // that can happen is that such pod is scheduled. This was the default + // behavior in 1.8 and earlier and users should not be that surprised. // It should happen only in very rare case when scheduler schedules // a pod and user deletes a PVC that's used by it at the same time. - if pvc.ObjectMeta.DeletionTimestamp != nil { + // + // 2) Adding a check for kubernetes.io/pvc-protection here to prevent + // the existing running pods from being affected during the rebuild of + // the desired state of the world cache when the kubelet is restarted. + // It is safe for kubelet to add this check here because the PVC will + // be stuck in Terminating state until the pod is deleted. + if pvc.ObjectMeta.DeletionTimestamp != nil && !slices.Contains(pvc.Finalizers, util.PVCProtectionFinalizer) { return nil, errors.New("PVC is being deleted") } diff --git a/test/e2e/storage/csimock/csi_kubelet_restart.go b/test/e2e/storage/csimock/csi_kubelet_restart.go index 9d6ffdb3512..105ac427640 100644 --- a/test/e2e/storage/csimock/csi_kubelet_restart.go +++ b/test/e2e/storage/csimock/csi_kubelet_restart.go @@ -19,27 +19,35 @@ package csimock import ( "context" "fmt" - "os" - "os/exec" - "strings" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) -var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framework.WithSerial(), framework.WithDisruptive(), func() { +var _ = utils.SIGDescribe("CSI Mock when kubelet restart", framework.WithSerial(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("csi-mock-when-kubelet-restart") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged m := newMockDriverSetup(f) + ginkgo.BeforeEach(func() { + // These tests requires SSH to nodes, so the provider check should be identical to there + // (the limiting factor is the implementation of util.go's e2essh.GetSigner(...)). + + // Cluster must support node reboot + e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) + e2eskipper.SkipUnlessSSHKeyPresent() + }) + ginkgo.It("should not umount volume when the pvc is terminating but still used by a running pod", func(ctx context.Context) { + m.init(ctx, testParameters{ registerDriver: true, }) @@ -51,16 +59,16 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo ginkgo.By("Waiting for the Pod to be running") err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "failed to wait for pod %s to be running", pod.Name) + pod, err = f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get pod %s", pod.Name) ginkgo.By("Deleting the PVC") err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete PVC %s", pvc.Name) ginkgo.By("Restarting kubelet") - err = stopKindKubelet(ctx) - framework.ExpectNoError(err, "failed to stop kubelet") - err = startKindKubelet(ctx) - framework.ExpectNoError(err, "failed to start kubelet") + utils.KubeletCommand(ctx, utils.KRestart, f.ClientSet, pod) + ginkgo.DeferCleanup(utils.KubeletCommand, utils.KStart, f.ClientSet, pod) ginkgo.By("Verifying the PVC is terminating during kubelet restart") pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) @@ -69,7 +77,7 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo ginkgo.By(fmt.Sprintf("Verifying that the driver didn't receive NodeUnpublishVolume call for PVC %s", pvc.Name)) gomega.Consistently(ctx, - func(ctx context.Context) interface{} { + func(ctx context.Context) []drivers.MockCSICall { calls, err := m.driver.GetCalls(ctx) if err != nil { if apierrors.IsUnexpectedServerError(err) { @@ -90,39 +98,3 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo framework.ExpectNoError(err, "failed to wait for pod %s to be running", pod.Name) }) }) - -func stopKindKubelet(ctx context.Context) error { - return kubeletExec("systemctl", "stop", "kubelet") -} - -func startKindKubelet(ctx context.Context) error { - return kubeletExec("systemctl", "start", "kubelet") -} - -// Run a command in container with kubelet (and the whole control plane as containers) -func kubeletExec(command ...string) error { - containerName := getKindContainerName() - args := []string{"exec", containerName} - args = append(args, command...) - cmd := exec.Command("docker", args...) - - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("command %q failed: %v\noutput:%s", prettyCmd(cmd), err, string(out)) - } - - framework.Logf("command %q succeeded:\n%s", prettyCmd(cmd), string(out)) - return nil -} - -func getKindContainerName() string { - clusterName := os.Getenv("KIND_CLUSTER_NAME") - if clusterName == "" { - clusterName = "kind" - } - return clusterName + "-control-plane" -} - -func prettyCmd(cmd *exec.Cmd) string { - return fmt.Sprintf("%s %s", cmd.Path, strings.Join(cmd.Args, " ")) -}