mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
Merge pull request #122082 from carlory/remove-keep-terminated-pod-volumes
keep-terminated-pod-volumes flag on kubelet is removed
This commit is contained in:
commit
ef2c682635
@ -131,9 +131,6 @@ type KubeletFlags struct {
|
|||||||
// schedulable. Won't have any effect if register-node is false.
|
// schedulable. Won't have any effect if register-node is false.
|
||||||
// DEPRECATED: use registerWithTaints instead
|
// DEPRECATED: use registerWithTaints instead
|
||||||
RegisterSchedulable bool
|
RegisterSchedulable bool
|
||||||
// This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node.
|
|
||||||
// This can be useful for debugging volume related issues.
|
|
||||||
KeepTerminatedPodVolumes bool
|
|
||||||
// SeccompDefault enables the use of `RuntimeDefault` as the default seccomp profile for all workloads on the node.
|
// SeccompDefault enables the use of `RuntimeDefault` as the default seccomp profile for all workloads on the node.
|
||||||
SeccompDefault bool
|
SeccompDefault bool
|
||||||
}
|
}
|
||||||
@ -321,8 +318,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
|
|||||||
fs.MarkDeprecated("maximum-dead-containers", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.")
|
fs.MarkDeprecated("maximum-dead-containers", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.")
|
||||||
fs.BoolVar(&f.RegisterSchedulable, "register-schedulable", f.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false.")
|
fs.BoolVar(&f.RegisterSchedulable, "register-schedulable", f.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false.")
|
||||||
fs.MarkDeprecated("register-schedulable", "will be removed in a future version")
|
fs.MarkDeprecated("register-schedulable", "will be removed in a future version")
|
||||||
fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.")
|
|
||||||
fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version")
|
|
||||||
fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.")
|
fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.")
|
||||||
fs.MarkDeprecated("experimental-mounter-path", "will be removed in 1.25 or later. in favor of using CSI.")
|
fs.MarkDeprecated("experimental-mounter-path", "will be removed in 1.25 or later. in favor of using CSI.")
|
||||||
fs.StringVar(&f.CloudConfigFile, "cloud-config", f.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
fs.StringVar(&f.CloudConfigFile, "cloud-config", f.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||||
|
@ -1313,7 +1313,6 @@ func createAndInitKubelet(kubeServer *options.KubeletServer,
|
|||||||
kubeServer.MaxPerPodContainerCount,
|
kubeServer.MaxPerPodContainerCount,
|
||||||
kubeServer.MaxContainerCount,
|
kubeServer.MaxContainerCount,
|
||||||
kubeServer.RegisterSchedulable,
|
kubeServer.RegisterSchedulable,
|
||||||
kubeServer.KeepTerminatedPodVolumes,
|
|
||||||
kubeServer.NodeLabels,
|
kubeServer.NodeLabels,
|
||||||
kubeServer.NodeStatusMaxImages,
|
kubeServer.NodeStatusMaxImages,
|
||||||
kubeServer.KubeletFlags.SeccompDefault || kubeServer.KubeletConfiguration.SeccompDefault)
|
kubeServer.KubeletFlags.SeccompDefault || kubeServer.KubeletConfiguration.SeccompDefault)
|
||||||
|
@ -887,15 +887,9 @@ func (adc *attachDetachController) GetExec(pluginName string) utilexec.Interface
|
|||||||
|
|
||||||
func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.NodeName) {
|
func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.NodeName) {
|
||||||
if _, exists := node.Annotations[volumeutil.ControllerManagedAttachAnnotation]; exists {
|
if _, exists := node.Annotations[volumeutil.ControllerManagedAttachAnnotation]; exists {
|
||||||
keepTerminatedPodVolumes := false
|
|
||||||
|
|
||||||
if t, ok := node.Annotations[volumeutil.KeepTerminatedPodVolumesAnnotation]; ok {
|
|
||||||
keepTerminatedPodVolumes = t == "true"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node specifies annotation indicating it should be managed by attach
|
// Node specifies annotation indicating it should be managed by attach
|
||||||
// detach controller. Add it to desired state of world.
|
// detach controller. Add it to desired state of world.
|
||||||
adc.desiredStateOfWorld.AddNode(nodeName, keepTerminatedPodVolumes)
|
adc.desiredStateOfWorld.AddNode(nodeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
@ -46,9 +46,7 @@ type DesiredStateOfWorld interface {
|
|||||||
// AddNode adds the given node to the list of nodes managed by the attach/
|
// AddNode adds the given node to the list of nodes managed by the attach/
|
||||||
// detach controller.
|
// detach controller.
|
||||||
// If the node already exists this is a no-op.
|
// If the node already exists this is a no-op.
|
||||||
// keepTerminatedPodVolumes is a property of the node that determines
|
AddNode(nodeName k8stypes.NodeName)
|
||||||
// if volumes should be mounted and attached for terminated pods.
|
|
||||||
AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)
|
|
||||||
|
|
||||||
// AddPod adds the given pod to the list of pods that reference the
|
// AddPod adds the given pod to the list of pods that reference the
|
||||||
// specified volume and is scheduled to the specified node.
|
// specified volume and is scheduled to the specified node.
|
||||||
@ -98,10 +96,6 @@ type DesiredStateOfWorld interface {
|
|||||||
// state of world
|
// state of world
|
||||||
GetPodToAdd() map[types.UniquePodName]PodToAdd
|
GetPodToAdd() map[types.UniquePodName]PodToAdd
|
||||||
|
|
||||||
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
|
|
||||||
// mounted and attached for terminated pods
|
|
||||||
GetKeepTerminatedPodVolumesForNode(k8stypes.NodeName) bool
|
|
||||||
|
|
||||||
// Mark multi-attach error as reported to prevent spamming multiple
|
// Mark multi-attach error as reported to prevent spamming multiple
|
||||||
// events for same error
|
// events for same error
|
||||||
SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName)
|
SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName)
|
||||||
@ -158,10 +152,6 @@ type nodeManaged struct {
|
|||||||
// attached to this node. The key in the map is the name of the volume and
|
// attached to this node. The key in the map is the name of the volume and
|
||||||
// the value is a volumeToAttach object containing more information about the volume.
|
// the value is a volumeToAttach object containing more information about the volume.
|
||||||
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
|
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
|
||||||
|
|
||||||
// keepTerminatedPodVolumes determines if for terminated pods(on this node) - volumes
|
|
||||||
// should be kept mounted and attached.
|
|
||||||
keepTerminatedPodVolumes bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The volumeToAttach object represents a volume that should be attached to a node.
|
// The volumeToAttach object represents a volume that should be attached to a node.
|
||||||
@ -195,15 +185,14 @@ type pod struct {
|
|||||||
podObj *v1.Pod
|
podObj *v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool) {
|
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
|
||||||
dsw.Lock()
|
dsw.Lock()
|
||||||
defer dsw.Unlock()
|
defer dsw.Unlock()
|
||||||
|
|
||||||
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
|
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
|
||||||
dsw.nodesManaged[nodeName] = nodeManaged{
|
dsw.nodesManaged[nodeName] = nodeManaged{
|
||||||
nodeName: nodeName,
|
nodeName: nodeName,
|
||||||
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
|
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
|
||||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -356,21 +345,6 @@ func (dsw *desiredStateOfWorld) SetMultiAttachError(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
|
|
||||||
// mounted and attached for terminated pods
|
|
||||||
func (dsw *desiredStateOfWorld) GetKeepTerminatedPodVolumesForNode(nodeName k8stypes.NodeName) bool {
|
|
||||||
dsw.RLock()
|
|
||||||
defer dsw.RUnlock()
|
|
||||||
|
|
||||||
if nodeName == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if node, ok := dsw.nodesManaged[nodeName]; ok {
|
|
||||||
return node.keepTerminatedPodVolumes
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
|
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
|
||||||
dsw.RLock()
|
dsw.RLock()
|
||||||
defer dsw.RUnlock()
|
defer dsw.RUnlock()
|
||||||
|
@ -19,7 +19,7 @@ package cache
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
@ -35,7 +35,7 @@ func Test_AddNode_Positive_NewNode(t *testing.T) {
|
|||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
nodeExists := dsw.NodeExists(nodeName)
|
nodeExists := dsw.NodeExists(nodeName)
|
||||||
@ -60,7 +60,7 @@ func Test_AddNode_Positive_ExistingNode(t *testing.T) {
|
|||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
nodeExists := dsw.NodeExists(nodeName)
|
nodeExists := dsw.NodeExists(nodeName)
|
||||||
@ -69,7 +69,7 @@ func Test_AddNode_Positive_ExistingNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
nodeExists = dsw.NodeExists(nodeName)
|
nodeExists = dsw.NodeExists(nodeName)
|
||||||
@ -94,7 +94,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -142,7 +142,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -215,7 +215,7 @@ func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -319,7 +319,7 @@ func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
|
|||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
err := dsw.DeleteNode(nodeName)
|
err := dsw.DeleteNode(nodeName)
|
||||||
@ -375,7 +375,7 @@ func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
|
|||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
podName := "pod-uid"
|
podName := "pod-uid"
|
||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@ -419,7 +419,7 @@ func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -467,7 +467,7 @@ func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
generatedVolumeName1, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
|
generatedVolumeName1, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
|
||||||
if pod1AddErr != nil {
|
if pod1AddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -528,7 +528,7 @@ func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
generatedVolumeName, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
|
generatedVolumeName, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
|
||||||
if pod1AddErr != nil {
|
if pod1AddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -576,7 +576,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
node1Name := k8stypes.NodeName("node1-name")
|
node1Name := k8stypes.NodeName("node1-name")
|
||||||
dsw.AddNode(node1Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node1Name)
|
||||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, node1Name)
|
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, node1Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -631,7 +631,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
|||||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName)
|
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -705,7 +705,7 @@ func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
|
|||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
nodeExists := dsw.NodeExists(nodeName)
|
nodeExists := dsw.NodeExists(nodeName)
|
||||||
@ -729,7 +729,7 @@ func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
|||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
podName := "pod-uid"
|
podName := "pod-uid"
|
||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@ -759,7 +759,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
|||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
podName := "pod-uid"
|
podName := "pod-uid"
|
||||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
@ -836,8 +836,8 @@ func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
|
|||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
node1Name := k8stypes.NodeName("node1-name")
|
node1Name := k8stypes.NodeName("node1-name")
|
||||||
node2Name := k8stypes.NodeName("node2-name")
|
node2Name := k8stypes.NodeName("node2-name")
|
||||||
dsw.AddNode(node1Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node1Name)
|
||||||
dsw.AddNode(node2Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node2Name)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
volumesToAttach := dsw.GetVolumesToAttach()
|
volumesToAttach := dsw.GetVolumesToAttach()
|
||||||
@ -859,7 +859,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
|||||||
pod1Name := "pod1-uid"
|
pod1Name := "pod1-uid"
|
||||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node1Name)
|
||||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -871,7 +871,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
|||||||
pod2Name := "pod2-uid"
|
pod2Name := "pod2-uid"
|
||||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
dsw.AddNode(node2Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node2Name)
|
||||||
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -904,7 +904,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
|||||||
pod1Name := "pod1-uid"
|
pod1Name := "pod1-uid"
|
||||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node1Name)
|
||||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -916,7 +916,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
|||||||
pod2Name := "pod2-uid"
|
pod2Name := "pod2-uid"
|
||||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
dsw.AddNode(node2Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node2Name)
|
||||||
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -958,7 +958,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
|||||||
pod1Name := "pod1-uid"
|
pod1Name := "pod1-uid"
|
||||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node1Name)
|
||||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -970,7 +970,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
|||||||
pod2aName := "pod2a-name"
|
pod2aName := "pod2a-name"
|
||||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
dsw.AddNode(node2Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node2Name)
|
||||||
generatedVolume2Name1, podAddErr := dsw.AddPod(types.UniquePodName(pod2aName), controllervolumetesting.NewPod(pod2aName, pod2aName), volume2Spec, node2Name)
|
generatedVolume2Name1, podAddErr := dsw.AddPod(types.UniquePodName(pod2aName), controllervolumetesting.NewPod(pod2aName, pod2aName), volume2Spec, node2Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -1042,7 +1042,7 @@ func Test_GetPodsOnNodes(t *testing.T) {
|
|||||||
pod1Name := "pod1-uid"
|
pod1Name := "pod1-uid"
|
||||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node1Name)
|
||||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -1054,7 +1054,7 @@ func Test_GetPodsOnNodes(t *testing.T) {
|
|||||||
pod2Name := "pod2-uid"
|
pod2Name := "pod2-uid"
|
||||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
dsw.AddNode(node2Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node2Name)
|
||||||
_, podAddErr = dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
_, podAddErr = dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -1065,7 +1065,7 @@ func Test_GetPodsOnNodes(t *testing.T) {
|
|||||||
|
|
||||||
// Third node without any pod
|
// Third node without any pod
|
||||||
node3Name := k8stypes.NodeName("node3-name")
|
node3Name := k8stypes.NodeName("node3-name")
|
||||||
dsw.AddNode(node3Name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(node3Name)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
pods := dsw.GetVolumePodsOnNodes([]k8stypes.NodeName{node1Name, node2Name, node3Name, "non-existing-node"}, generatedVolume1Name)
|
pods := dsw.GetVolumePodsOnNodes([]k8stypes.NodeName{node1Name, node2Name, node3Name, "non-existing-node"}, generatedVolume1Name)
|
||||||
|
@ -147,7 +147,7 @@ func TestTotalVolumesMetricCollection(t *testing.T) {
|
|||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
|
|
||||||
dsw.AddNode(nodeName, false)
|
dsw.AddNode(nodeName)
|
||||||
_, err := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
_, err := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
|
@ -91,7 +91,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//add the given node to the list of nodes managed by dsw
|
//add the given node to the list of nodes managed by dsw
|
||||||
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/)
|
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName))
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
dswp.findAndAddActivePods(logger)
|
dswp.findAndAddActivePods(logger)
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//add the given node to the list of nodes managed by dsw
|
//add the given node to the list of nodes managed by dsw
|
||||||
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/)
|
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName))
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
dswp.findAndAddActivePods(logger)
|
dswp.findAndAddActivePods(logger)
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -170,7 +170,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -248,7 +248,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
|
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
@ -327,7 +327,7 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
|
|||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := k8stypes.NodeName("node-name")
|
nodeName := k8stypes.NodeName("node-name")
|
||||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName)
|
||||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -408,8 +408,8 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
|
|||||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}
|
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}
|
||||||
nodeName1 := k8stypes.NodeName("node-name1")
|
nodeName1 := k8stypes.NodeName("node-name1")
|
||||||
nodeName2 := k8stypes.NodeName(volumetesting.MultiAttachNode)
|
nodeName2 := k8stypes.NodeName(volumetesting.MultiAttachNode)
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName2)
|
||||||
|
|
||||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
@ -503,8 +503,8 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteOnce(t *testing.
|
|||||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
nodeName1 := k8stypes.NodeName("node-name1")
|
nodeName1 := k8stypes.NodeName("node-name1")
|
||||||
nodeName2 := k8stypes.NodeName("node-name2")
|
nodeName2 := k8stypes.NodeName("node-name2")
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName2)
|
||||||
|
|
||||||
// Add both pods at the same time to provoke a potential race condition in the reconciler
|
// Add both pods at the same time to provoke a potential race condition in the reconciler
|
||||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
||||||
@ -596,8 +596,8 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing
|
|||||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
nodeName1 := k8stypes.NodeName(volumetesting.UncertainAttachNode)
|
nodeName1 := k8stypes.NodeName(volumetesting.UncertainAttachNode)
|
||||||
nodeName2 := k8stypes.NodeName("node-name2")
|
nodeName2 := k8stypes.NodeName("node-name2")
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName2)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, ctx := ktesting.NewTestContext(t)
|
||||||
@ -660,7 +660,7 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
|
|||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
nodeName1 := k8stypes.NodeName("node-name1")
|
nodeName1 := k8stypes.NodeName("node-name1")
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
|
|
||||||
// Add the pod in which the volume is attached to the FailDetachNode
|
// Add the pod in which the volume is attached to the FailDetachNode
|
||||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
||||||
@ -722,8 +722,8 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
|
|||||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
nodeName1 := k8stypes.NodeName(volumetesting.FailDetachNode)
|
nodeName1 := k8stypes.NodeName(volumetesting.FailDetachNode)
|
||||||
nodeName2 := k8stypes.NodeName("node-name2")
|
nodeName2 := k8stypes.NodeName("node-name2")
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName2)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, ctx := ktesting.NewTestContext(t)
|
||||||
@ -802,8 +802,8 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T
|
|||||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
nodeName1 := k8stypes.NodeName(volumetesting.TimeoutAttachNode)
|
nodeName1 := k8stypes.NodeName(volumetesting.TimeoutAttachNode)
|
||||||
nodeName2 := k8stypes.NodeName("node-name2")
|
nodeName2 := k8stypes.NodeName("node-name2")
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName2)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, ctx := ktesting.NewTestContext(t)
|
||||||
@ -881,7 +881,7 @@ func Test_Run_OneVolumeDetachOnOutOfServiceTaintedNode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
informerFactory.Core().V1().Nodes().Informer().GetStore().Add(node1)
|
informerFactory.Core().V1().Nodes().Informer().GetStore().Add(node1)
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -962,7 +962,7 @@ func Test_Run_OneVolumeDetachOnNoOutOfServiceTaintedNode(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{Name: string(nodeName1)},
|
ObjectMeta: metav1.ObjectMeta{Name: string(nodeName1)},
|
||||||
}
|
}
|
||||||
informerFactory.Core().V1().Nodes().Informer().GetStore().Add(node1)
|
informerFactory.Core().V1().Nodes().Informer().GetStore().Add(node1)
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -1047,7 +1047,7 @@ func Test_Run_OneVolumeDetachOnUnhealthyNode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
informerFactory.Core().V1().Nodes().Informer().GetStore().Add(node1)
|
informerFactory.Core().V1().Nodes().Informer().GetStore().Add(node1)
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -1162,7 +1162,7 @@ func Test_Run_OneVolumeDetachOnUnhealthyNodeWithForceDetachOnUnmountDisabled(t *
|
|||||||
if addErr != nil {
|
if addErr != nil {
|
||||||
t.Fatalf("Add node failed. Expected: <no error> Actual: <%v>", addErr)
|
t.Fatalf("Add node failed. Expected: <no error> Actual: <%v>", addErr)
|
||||||
}
|
}
|
||||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(nodeName1)
|
||||||
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
volumeExists := dsw.VolumeExists(volumeName1, nodeName1)
|
||||||
if volumeExists {
|
if volumeExists {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@ -1307,7 +1307,7 @@ func Test_ReportMultiAttachError(t *testing.T) {
|
|||||||
|
|
||||||
nodes := []k8stypes.NodeName{}
|
nodes := []k8stypes.NodeName{}
|
||||||
for _, n := range test.nodes {
|
for _, n := range test.nodes {
|
||||||
dsw.AddNode(n.name, false /*keepTerminatedPodVolumes*/)
|
dsw.AddNode(n.name)
|
||||||
nodes = append(nodes, n.name)
|
nodes = append(nodes, n.name)
|
||||||
for _, podName := range n.podNames {
|
for _, podName := range n.podNames {
|
||||||
volumeName := v1.UniqueVolumeName("volume-name")
|
volumeName := v1.UniqueVolumeName("volume-name")
|
||||||
|
@ -174,11 +174,7 @@ func DetermineVolumeAction(pod *v1.Pod, desiredStateOfWorld cache.DesiredStateOf
|
|||||||
}
|
}
|
||||||
|
|
||||||
if util.IsPodTerminated(pod, pod.Status) {
|
if util.IsPodTerminated(pod, pod.Status) {
|
||||||
nodeName := types.NodeName(pod.Spec.NodeName)
|
return false
|
||||||
keepTerminatedPodVolume := desiredStateOfWorld.GetKeepTerminatedPodVolumesForNode(nodeName)
|
|
||||||
// if pod is terminate we let kubelet policy dictate if volume
|
|
||||||
// should be detached or not
|
|
||||||
return keepTerminatedPodVolume
|
|
||||||
}
|
}
|
||||||
return defaultAction
|
return defaultAction
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
maxPerPodContainerCount int32,
|
maxPerPodContainerCount int32,
|
||||||
maxContainerCount int32,
|
maxContainerCount int32,
|
||||||
registerSchedulable bool,
|
registerSchedulable bool,
|
||||||
keepTerminatedPodVolumes bool,
|
|
||||||
nodeLabels map[string]string,
|
nodeLabels map[string]string,
|
||||||
nodeStatusMaxImages int32,
|
nodeStatusMaxImages int32,
|
||||||
seccompDefault bool,
|
seccompDefault bool,
|
||||||
@ -561,7 +560,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
clock: clock.RealClock{},
|
clock: clock.RealClock{},
|
||||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
|
||||||
nodeStatusMaxImages: nodeStatusMaxImages,
|
nodeStatusMaxImages: nodeStatusMaxImages,
|
||||||
tracer: tracer,
|
tracer: tracer,
|
||||||
nodeStartupLatencyTracker: kubeDeps.NodeStartupLatencyTracker,
|
nodeStartupLatencyTracker: kubeDeps.NodeStartupLatencyTracker,
|
||||||
@ -849,7 +847,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
kubeDeps.HostUtil,
|
kubeDeps.HostUtil,
|
||||||
klet.getPodsDir(),
|
klet.getPodsDir(),
|
||||||
kubeDeps.Recorder,
|
kubeDeps.Recorder,
|
||||||
keepTerminatedPodVolumes,
|
|
||||||
volumepathhandler.NewBlockVolumePathHandler())
|
volumepathhandler.NewBlockVolumePathHandler())
|
||||||
|
|
||||||
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||||
@ -1304,10 +1301,6 @@ type Kubelet struct {
|
|||||||
// StatsProvider provides the node and the container stats.
|
// StatsProvider provides the node and the container stats.
|
||||||
StatsProvider *stats.Provider
|
StatsProvider *stats.Provider
|
||||||
|
|
||||||
// This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node.
|
|
||||||
// This can be useful for debugging volume related issues.
|
|
||||||
keepTerminatedPodVolumes bool // DEPRECATED
|
|
||||||
|
|
||||||
// pluginmanager runs a set of asynchronous loops that figure out which
|
// pluginmanager runs a set of asynchronous loops that figure out which
|
||||||
// plugins need to be registered/unregistered based on this node and makes it so.
|
// plugins need to be registered/unregistered based on this node and makes it so.
|
||||||
pluginManager pluginmanager.PluginManager
|
pluginManager pluginmanager.PluginManager
|
||||||
@ -2167,20 +2160,18 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
|
|||||||
}
|
}
|
||||||
klog.V(4).InfoS("Pod termination unmounted volumes", "pod", klog.KObj(pod), "podUID", pod.UID)
|
klog.V(4).InfoS("Pod termination unmounted volumes", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||||
|
|
||||||
if !kl.keepTerminatedPodVolumes {
|
// This waiting loop relies on the background cleanup which starts after pod workers respond
|
||||||
// This waiting loop relies on the background cleanup which starts after pod workers respond
|
// true for ShouldPodRuntimeBeRemoved, which happens after `SyncTerminatingPod` is completed.
|
||||||
// true for ShouldPodRuntimeBeRemoved, which happens after `SyncTerminatingPod` is completed.
|
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||||
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
volumesExist := kl.podVolumesExist(pod.UID)
|
||||||
volumesExist := kl.podVolumesExist(pod.UID)
|
if volumesExist {
|
||||||
if volumesExist {
|
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||||
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod), "podUID", pod.UID)
|
|
||||||
}
|
|
||||||
return !volumesExist, nil
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
klog.V(3).InfoS("Pod termination cleaned up volume paths", "pod", klog.KObj(pod), "podUID", pod.UID)
|
return !volumesExist, nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
klog.V(3).InfoS("Pod termination cleaned up volume paths", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||||
|
|
||||||
// After volume unmount is complete, let the secret and configmap managers know we're done with this pod
|
// After volume unmount is complete, let the secret and configmap managers know we're done with this pod
|
||||||
if kl.secretManager != nil {
|
if kl.secretManager != nil {
|
||||||
|
@ -359,14 +359,6 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
|
|||||||
klog.V(2).InfoS("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
klog.V(2).InfoS("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
||||||
}
|
}
|
||||||
|
|
||||||
if kl.keepTerminatedPodVolumes {
|
|
||||||
if node.Annotations == nil {
|
|
||||||
node.Annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
klog.V(2).InfoS("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
|
||||||
node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
|
|
||||||
}
|
|
||||||
|
|
||||||
// @question: should this be place after the call to the cloud provider? which also applies labels
|
// @question: should this be place after the call to the cloud provider? which also applies labels
|
||||||
for k, v := range kl.nodeLabels {
|
for k, v := range kl.nodeLabels {
|
||||||
if cv, found := node.ObjectMeta.Labels[k]; found {
|
if cv, found := node.ObjectMeta.Labels[k]; found {
|
||||||
|
@ -2427,9 +2427,8 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupP
|
|||||||
// If volumes have not been unmounted/detached, do not delete the cgroup
|
// If volumes have not been unmounted/detached, do not delete the cgroup
|
||||||
// so any memory backed volumes don't have their charges propagated to the
|
// so any memory backed volumes don't have their charges propagated to the
|
||||||
// parent croup. If the volumes still exist, reduce the cpu shares for any
|
// parent croup. If the volumes still exist, reduce the cpu shares for any
|
||||||
// process in the cgroup to the minimum value while we wait. if the kubelet
|
// process in the cgroup to the minimum value while we wait.
|
||||||
// is configured to keep terminated volumes, we will delete the cgroup and not block.
|
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
|
||||||
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes {
|
|
||||||
klog.V(3).InfoS("Orphaned pod found, but volumes not yet removed. Reducing cpu to minimum", "podUID", uid)
|
klog.V(3).InfoS("Orphaned pod found, but volumes not yet removed. Reducing cpu to minimum", "podUID", uid)
|
||||||
if err := pcm.ReduceCPULimits(val); err != nil {
|
if err := pcm.ReduceCPULimits(val); err != nil {
|
||||||
klog.InfoS("Failed to reduce cpu time for pod pending volume cleanup", "podUID", uid, "err", err)
|
klog.InfoS("Failed to reduce cpu time for pod pending volume cleanup", "podUID", uid, "err", err)
|
||||||
|
@ -392,7 +392,6 @@ func newTestKubeletWithImageList(
|
|||||||
kubelet.hostutil,
|
kubelet.hostutil,
|
||||||
kubelet.getPodsDir(),
|
kubelet.getPodsDir(),
|
||||||
kubelet.recorder,
|
kubelet.recorder,
|
||||||
false, /* keepTerminatedPodVolumes */
|
|
||||||
volumetest.NewBlockVolumePathHandler())
|
volumetest.NewBlockVolumePathHandler())
|
||||||
|
|
||||||
kubelet.pluginManager = pluginmanager.NewPluginManager(
|
kubelet.pluginManager = pluginmanager.NewPluginManager(
|
||||||
@ -3119,7 +3118,6 @@ func TestNewMainKubeletStandAlone(t *testing.T) {
|
|||||||
1024,
|
1024,
|
||||||
110,
|
110,
|
||||||
true,
|
true,
|
||||||
true,
|
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
1024,
|
1024,
|
||||||
false,
|
false,
|
||||||
|
@ -122,7 +122,6 @@ func TestRunOnce(t *testing.T) {
|
|||||||
kb.hostutil,
|
kb.hostutil,
|
||||||
kb.getPodsDir(),
|
kb.getPodsDir(),
|
||||||
kb.recorder,
|
kb.recorder,
|
||||||
false, /* keepTerminatedPodVolumes */
|
|
||||||
volumetest.NewBlockVolumePathHandler())
|
volumetest.NewBlockVolumePathHandler())
|
||||||
|
|
||||||
// TODO: Factor out "stats.Provider" from Kubelet so we don't have a cyclic dependency
|
// TODO: Factor out "stats.Provider" from Kubelet so we don't have a cyclic dependency
|
||||||
|
@ -99,7 +99,6 @@ func NewDesiredStateOfWorldPopulator(
|
|||||||
desiredStateOfWorld cache.DesiredStateOfWorld,
|
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||||
actualStateOfWorld cache.ActualStateOfWorld,
|
actualStateOfWorld cache.ActualStateOfWorld,
|
||||||
kubeContainerRuntime kubecontainer.Runtime,
|
kubeContainerRuntime kubecontainer.Runtime,
|
||||||
keepTerminatedPodVolumes bool,
|
|
||||||
csiMigratedPluginManager csimigration.PluginManager,
|
csiMigratedPluginManager csimigration.PluginManager,
|
||||||
intreeToCSITranslator csimigration.InTreeToCSITranslator,
|
intreeToCSITranslator csimigration.InTreeToCSITranslator,
|
||||||
volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorldPopulator {
|
volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorldPopulator {
|
||||||
@ -113,7 +112,6 @@ func NewDesiredStateOfWorldPopulator(
|
|||||||
pods: processedPods{
|
pods: processedPods{
|
||||||
processedPods: make(map[volumetypes.UniquePodName]bool)},
|
processedPods: make(map[volumetypes.UniquePodName]bool)},
|
||||||
kubeContainerRuntime: kubeContainerRuntime,
|
kubeContainerRuntime: kubeContainerRuntime,
|
||||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
|
||||||
hasAddedPods: false,
|
hasAddedPods: false,
|
||||||
hasAddedPodsLock: sync.RWMutex{},
|
hasAddedPodsLock: sync.RWMutex{},
|
||||||
csiMigratedPluginManager: csiMigratedPluginManager,
|
csiMigratedPluginManager: csiMigratedPluginManager,
|
||||||
@ -131,7 +129,6 @@ type desiredStateOfWorldPopulator struct {
|
|||||||
actualStateOfWorld cache.ActualStateOfWorld
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
pods processedPods
|
pods processedPods
|
||||||
kubeContainerRuntime kubecontainer.Runtime
|
kubeContainerRuntime kubecontainer.Runtime
|
||||||
keepTerminatedPodVolumes bool
|
|
||||||
hasAddedPods bool
|
hasAddedPods bool
|
||||||
hasAddedPodsLock sync.RWMutex
|
hasAddedPodsLock sync.RWMutex
|
||||||
csiMigratedPluginManager csimigration.PluginManager
|
csiMigratedPluginManager csimigration.PluginManager
|
||||||
@ -234,9 +231,6 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
|||||||
if !dswp.podStateProvider.ShouldPodRuntimeBeRemoved(pod.UID) {
|
if !dswp.podStateProvider.ShouldPodRuntimeBeRemoved(pod.UID) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if dswp.keepTerminatedPodVolumes {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Once a pod has been deleted from kubelet pod manager, do not delete
|
// Once a pod has been deleted from kubelet pod manager, do not delete
|
||||||
|
@ -1596,7 +1596,6 @@ func createDswpWithVolumeWithCustomPluginMgr(t *testing.T, pv *v1.PersistentVolu
|
|||||||
pods: processedPods{
|
pods: processedPods{
|
||||||
processedPods: make(map[types.UniquePodName]bool)},
|
processedPods: make(map[types.UniquePodName]bool)},
|
||||||
kubeContainerRuntime: fakeRuntime,
|
kubeContainerRuntime: fakeRuntime,
|
||||||
keepTerminatedPodVolumes: false,
|
|
||||||
csiMigratedPluginManager: csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate),
|
csiMigratedPluginManager: csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate),
|
||||||
intreeToCSITranslator: csiTranslator,
|
intreeToCSITranslator: csiTranslator,
|
||||||
volumePluginMgr: fakeVolumePluginMgr,
|
volumePluginMgr: fakeVolumePluginMgr,
|
||||||
|
@ -182,7 +182,6 @@ func NewVolumeManager(
|
|||||||
hostutil hostutil.HostUtils,
|
hostutil hostutil.HostUtils,
|
||||||
kubeletPodsDir string,
|
kubeletPodsDir string,
|
||||||
recorder record.EventRecorder,
|
recorder record.EventRecorder,
|
||||||
keepTerminatedPodVolumes bool,
|
|
||||||
blockVolumePathHandler volumepathhandler.BlockVolumePathHandler) VolumeManager {
|
blockVolumePathHandler volumepathhandler.BlockVolumePathHandler) VolumeManager {
|
||||||
|
|
||||||
seLinuxTranslator := util.NewSELinuxLabelTranslator()
|
seLinuxTranslator := util.NewSELinuxLabelTranslator()
|
||||||
@ -211,7 +210,6 @@ func NewVolumeManager(
|
|||||||
vm.desiredStateOfWorld,
|
vm.desiredStateOfWorld,
|
||||||
vm.actualStateOfWorld,
|
vm.actualStateOfWorld,
|
||||||
kubeContainerRuntime,
|
kubeContainerRuntime,
|
||||||
keepTerminatedPodVolumes,
|
|
||||||
csiMigratedPluginManager,
|
csiMigratedPluginManager,
|
||||||
intreeToCSITranslator,
|
intreeToCSITranslator,
|
||||||
volumePluginMgr)
|
volumePluginMgr)
|
||||||
|
@ -415,7 +415,6 @@ func newTestVolumeManager(t *testing.T, tmpDir string, podManager kubepod.Manage
|
|||||||
hostutil.NewFakeHostUtil(nil),
|
hostutil.NewFakeHostUtil(nil),
|
||||||
"",
|
"",
|
||||||
fakeRecorder,
|
fakeRecorder,
|
||||||
false, /* keepTerminatedPodVolumes */
|
|
||||||
fakePathHandler)
|
fakePathHandler)
|
||||||
|
|
||||||
return vm
|
return vm
|
||||||
|
@ -59,10 +59,6 @@ const (
|
|||||||
// managed by the attach/detach controller
|
// managed by the attach/detach controller
|
||||||
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
||||||
|
|
||||||
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
|
|
||||||
// that decides if pod volumes are unmounted when pod is terminated
|
|
||||||
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
|
|
||||||
|
|
||||||
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
|
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
|
||||||
// name to create the place for volume mounts in the global PD path.
|
// name to create the place for volume mounts in the global PD path.
|
||||||
MountsInGlobalPDPath = "mounts"
|
MountsInGlobalPDPath = "mounts"
|
||||||
|
@ -286,78 +286,6 @@ func TestPodUpdateWithWithADC(t *testing.T) {
|
|||||||
waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 20*time.Second, "expected 0 pods in dsw after pod completion", 0)
|
waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 20*time.Second, "expected 0 pods in dsw after pod completion", 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
|
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
|
||||||
defer server.TearDownFn()
|
|
||||||
namespaceName := "test-pod-update"
|
|
||||||
|
|
||||||
node := &v1.Node{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "node-sandbox",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
util.ControllerManagedAttachAnnotation: "true",
|
|
||||||
util.KeepTerminatedPodVolumesAnnotation: "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
tCtx := ktesting.Init(t)
|
|
||||||
defer tCtx.Cancel("test has completed")
|
|
||||||
testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig)
|
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
|
||||||
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
|
||||||
|
|
||||||
pod := fakePodWithVol(namespaceName)
|
|
||||||
podStopCh := make(chan struct{})
|
|
||||||
defer close(podStopCh)
|
|
||||||
|
|
||||||
if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
|
|
||||||
t.Fatalf("Failed to created node : %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go informers.Core().V1().Nodes().Informer().Run(podStopCh)
|
|
||||||
|
|
||||||
if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
|
||||||
t.Errorf("Failed to create pod : %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
|
||||||
go podInformer.Run(podStopCh)
|
|
||||||
|
|
||||||
// start controller loop
|
|
||||||
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done())
|
|
||||||
go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done())
|
|
||||||
go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done())
|
|
||||||
initCSIObjects(tCtx.Done(), informers)
|
|
||||||
go ctrl.Run(tCtx)
|
|
||||||
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
|
||||||
go pvCtrl.Run(tCtx)
|
|
||||||
|
|
||||||
waitToObservePods(t, podInformer, 1)
|
|
||||||
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("MetaNamespaceKeyFunc failed with : %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err = podInformer.GetStore().GetByKey(podKey)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Pod not found in Pod Informer cache : %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
waitForPodsInDSWP(t, ctrl.GetDesiredStateOfWorld())
|
|
||||||
|
|
||||||
pod.Status.Phase = v1.PodSucceeded
|
|
||||||
|
|
||||||
if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil {
|
|
||||||
t.Errorf("Failed to update pod : %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 20*time.Second, "expected non-zero pods in dsw if KeepTerminatedPodVolumesAnnotation is set", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for the podInformer to observe the pods. Call this function before
|
// wait for the podInformer to observe the pods. Call this function before
|
||||||
// running the RC manager to prevent the rc manager from creating new pods
|
// running the RC manager to prevent the rc manager from creating new pods
|
||||||
// rather than adopting the existing ones.
|
// rather than adopting the existing ones.
|
||||||
|
Loading…
Reference in New Issue
Block a user