mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 09:57:52 +00:00
use node informer to check volumes attachment status before backoff
fix unit tests
This commit is contained in:
@@ -1655,6 +1655,19 @@ func GetTestKubeletVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumeP
|
||||
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
|
||||
}
|
||||
|
||||
func GetTestKubeletVolumePluginMgrWithNode(t *testing.T, node *v1.Node) (*VolumePluginMgr, *FakeVolumePlugin) {
|
||||
plugins := ProbeVolumePlugins(VolumeConfig{})
|
||||
v := NewFakeKubeletVolumeHost(
|
||||
t,
|
||||
"", /* rootDir */
|
||||
nil, /* kubeClient */
|
||||
plugins, /* plugins */
|
||||
)
|
||||
v.WithNode(node)
|
||||
|
||||
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
|
||||
}
|
||||
|
||||
// CreateTestPVC returns a provisionable PVC for tests
|
||||
func CreateTestPVC(capacity string, accessModes []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
|
||||
claim := v1.PersistentVolumeClaim{
|
||||
|
@@ -64,6 +64,7 @@ type fakeVolumeHost struct {
|
||||
nodeLabels map[string]string
|
||||
nodeName string
|
||||
subpather subpath.Interface
|
||||
node *v1.Node
|
||||
csiDriverLister storagelistersv1.CSIDriverLister
|
||||
volumeAttachmentLister storagelistersv1.VolumeAttachmentLister
|
||||
informerFactory informers.SharedInformerFactory
|
||||
@@ -153,6 +154,10 @@ func (f *fakeVolumeHost) GetPluginMgr() *VolumePluginMgr {
|
||||
return f.pluginMgr
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error) {
|
||||
return map[v1.UniqueVolumeName]string{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
|
||||
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
||||
wrapperVolumeName := "wrapped_" + volName
|
||||
@@ -305,25 +310,25 @@ type fakeKubeletVolumeHost struct {
|
||||
var _ KubeletVolumeHost = &fakeKubeletVolumeHost{}
|
||||
var _ FakeVolumeHost = &fakeKubeletVolumeHost{}
|
||||
|
||||
func NewFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) FakeVolumeHost {
|
||||
func NewFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeKubeletVolumeHost {
|
||||
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
|
||||
}
|
||||
|
||||
func NewFakeKubeletVolumeHostWithCloudProvider(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) FakeVolumeHost {
|
||||
func NewFakeKubeletVolumeHostWithCloudProvider(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) *fakeKubeletVolumeHost {
|
||||
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, cloud, nil, "", nil, nil)
|
||||
}
|
||||
|
||||
func NewFakeKubeletVolumeHostWithNodeLabels(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, labels map[string]string) FakeVolumeHost {
|
||||
func NewFakeKubeletVolumeHostWithNodeLabels(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, labels map[string]string) *fakeKubeletVolumeHost {
|
||||
volHost := newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
|
||||
volHost.nodeLabels = labels
|
||||
return volHost
|
||||
}
|
||||
|
||||
func NewFakeKubeletVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) FakeVolumeHost {
|
||||
func NewFakeKubeletVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeKubeletVolumeHost {
|
||||
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, nodeName, driverLister, volumeAttachLister)
|
||||
}
|
||||
|
||||
func NewFakeKubeletVolumeHostWithMounterFSType(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType) FakeVolumeHost {
|
||||
func NewFakeKubeletVolumeHostWithMounterFSType(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType) *fakeKubeletVolumeHost {
|
||||
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, pathToTypeMap, "", nil, nil)
|
||||
}
|
||||
|
||||
@@ -351,6 +356,11 @@ func newFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset
|
||||
return host
|
||||
}
|
||||
|
||||
func (f *fakeKubeletVolumeHost) WithNode(node *v1.Node) *fakeKubeletVolumeHost {
|
||||
f.node = node
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *fakeKubeletVolumeHost) SetKubeletError(err error) {
|
||||
f.mux.Lock()
|
||||
defer f.mux.Unlock()
|
||||
@@ -362,6 +372,17 @@ func (f *fakeKubeletVolumeHost) GetInformerFactory() informers.SharedInformerFac
|
||||
return f.informerFactory
|
||||
}
|
||||
|
||||
func (f *fakeKubeletVolumeHost) GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error) {
|
||||
result := map[v1.UniqueVolumeName]string{}
|
||||
if f.node != nil {
|
||||
for _, av := range f.node.Status.VolumesAttached {
|
||||
result[av.Name] = av.DevicePath
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *fakeKubeletVolumeHost) CSIDriverLister() storagelistersv1.CSIDriverLister {
|
||||
return f.csiDriverLister
|
||||
}
|
||||
|
Reference in New Issue
Block a user