mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #67955 from jsafrane/csi-skip-attach-saad
Automatic merge from submit-queue (batch tested with PRs 68161, 68023, 67909, 67955, 67731). If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md. CSI: skip attach for non-attachable drivers **What this PR does / why we need it**: This is implementation of https://github.com/kubernetes/community/pull/2523. CSI volumes that don't need attach/detach now don't need external attacher running. WIP: * contains #67803 to get CSIDriver API. Ignore the first commit. * ~~missing e2e test~~ /sig storage cc: @saad-ali @vladimirvivien @verult @msau42 @gnufied @davidz627 **Release note**: ```release-note CSI volume plugin does not need external attacher for non-attachable CSI volumes. ```
This commit is contained in:
commit
19c2538798
@ -386,6 +386,10 @@ const (
|
|||||||
//
|
//
|
||||||
// Allow TTL controller to clean up Pods and Jobs after they finish.
|
// Allow TTL controller to clean up Pods and Jobs after they finish.
|
||||||
TTLAfterFinished utilfeature.Feature = "TTLAfterFinished"
|
TTLAfterFinished utilfeature.Feature = "TTLAfterFinished"
|
||||||
|
// owner: @jsafrane
|
||||||
|
// Kubernetes skips attaching CSI volumes that don't require attachment.
|
||||||
|
//
|
||||||
|
CSISkipAttach utilfeature.Feature = "CSISkipAttach"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -451,6 +455,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||||||
VolumeSnapshotDataSource: {Default: false, PreRelease: utilfeature.Alpha},
|
VolumeSnapshotDataSource: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
ProcMountType: {Default: false, PreRelease: utilfeature.Alpha},
|
ProcMountType: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
TTLAfterFinished: {Default: false, PreRelease: utilfeature.Alpha},
|
TTLAfterFinished: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
|
CSISkipAttach: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
|
|
||||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||||
// unintentionally on either side:
|
// unintentionally on either side:
|
||||||
|
@ -29,6 +29,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library",
|
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library",
|
||||||
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library",
|
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library",
|
||||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||||
"//vendor/github.com/golang/glog:go_default_library",
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
@ -59,10 +60,14 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||||
|
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
|
||||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||||
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -70,6 +70,16 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skip, err := c.plugin.skipAttach(csiSource.Driver)
|
||||||
|
if err != nil {
|
||||||
|
glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
glog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver))
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
node := string(nodeName)
|
node := string(nodeName)
|
||||||
pvName := spec.PersistentVolume.GetName()
|
pvName := spec.PersistentVolume.GetName()
|
||||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
|
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
|
||||||
@ -120,6 +130,16 @@ func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skip, err := c.plugin.skipAttach(source.Driver)
|
||||||
|
if err != nil {
|
||||||
|
glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
glog.V(4).Infof(log("Driver is not attachable, skip waiting for attach"))
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
|
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,11 +241,22 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
|
|||||||
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
|
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
skip, err := c.plugin.skipAttach(source.Driver)
|
||||||
|
if err != nil {
|
||||||
|
glog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err))
|
||||||
|
} else {
|
||||||
|
if skip {
|
||||||
|
// This volume is not attachable, pretend it's attached
|
||||||
|
attached[spec] = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
|
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
|
||||||
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
||||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
attached[spec] = false
|
||||||
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -325,19 +356,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
|
|
||||||
// Start MountDevice
|
// Start MountDevice
|
||||||
nodeName := string(c.plugin.host.GetNodeName())
|
nodeName := string(c.plugin.host.GetNodeName())
|
||||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
publishVolumeInfo, err := c.plugin.getPublishVolumeInfo(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||||
|
|
||||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
|
||||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err // This err already has enough context ("VolumeAttachment xyz not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
if attachment == nil {
|
|
||||||
err = errors.New("no existing VolumeAttachment found")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
|
||||||
|
|
||||||
nodeStageSecrets := map[string]string{}
|
nodeStageSecrets := map[string]string{}
|
||||||
if csiSource.NodeStageSecretRef != nil {
|
if csiSource.NodeStageSecretRef != nil {
|
||||||
|
@ -24,19 +24,28 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
storage "k8s.io/api/storage/v1beta1"
|
storage "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
utiltesting "k8s.io/client-go/util/testing"
|
utiltesting "k8s.io/client-go/util/testing"
|
||||||
|
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bFalse = false
|
||||||
|
bTrue = true
|
||||||
|
)
|
||||||
|
|
||||||
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
|
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
|
||||||
return &storage.VolumeAttachment{
|
return &storage.VolumeAttachment{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
@ -57,6 +66,40 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.RaceFreeFakeWatcher, attachID string, status storage.VolumeAttachmentStatus) {
|
||||||
|
ticker := time.NewTicker(10 * time.Millisecond)
|
||||||
|
var attach *storage.VolumeAttachment
|
||||||
|
var err error
|
||||||
|
defer ticker.Stop()
|
||||||
|
// wait for attachment to be saved
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
attach, err = client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
<-ticker.C
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if attach != nil {
|
||||||
|
glog.Infof("stopping wait")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.Infof("stopped wait")
|
||||||
|
|
||||||
|
if attach == nil {
|
||||||
|
t.Logf("attachment not found for id:%v", attachID)
|
||||||
|
} else {
|
||||||
|
attach.Status = status
|
||||||
|
_, err := client.StorageV1beta1().VolumeAttachments().Update(attach)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
watch.Modify(attach)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAttacherAttach(t *testing.T) {
|
func TestAttacherAttach(t *testing.T) {
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -120,8 +163,7 @@ func TestAttacherAttach(t *testing.T) {
|
|||||||
// attacher loop
|
// attacher loop
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
t.Logf("test case: %s", tc.name)
|
t.Logf("test case: %s", tc.name)
|
||||||
|
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
|
||||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
attacher, err := plug.NewAttacher()
|
attacher, err := plug.NewAttacher()
|
||||||
@ -146,42 +188,158 @@ func TestAttacherAttach(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}(tc.attachID, tc.nodeName, tc.shouldFail)
|
}(tc.attachID, tc.nodeName, tc.shouldFail)
|
||||||
|
|
||||||
// update attachment to avoid long waitForAttachment
|
var status storage.VolumeAttachmentStatus
|
||||||
ticker := time.NewTicker(10 * time.Millisecond)
|
if tc.injectAttacherError {
|
||||||
defer ticker.Stop()
|
status.Attached = false
|
||||||
// wait for attachment to be saved
|
status.AttachError = &storage.VolumeError{
|
||||||
var attach *storage.VolumeAttachment
|
Message: "attacher error",
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
if apierrs.IsNotFound(err) {
|
|
||||||
<-ticker.C
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
}
|
||||||
if attach != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if attach == nil {
|
|
||||||
t.Logf("attachment not found for id:%v", tc.attachID)
|
|
||||||
} else {
|
} else {
|
||||||
if tc.injectAttacherError {
|
status.Attached = true
|
||||||
attach.Status.Attached = false
|
|
||||||
attach.Status.AttachError = &storage.VolumeError{
|
|
||||||
Message: "attacher error",
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
attach.Status.Attached = true
|
|
||||||
}
|
|
||||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
fakeWatcher.Modify(attach)
|
|
||||||
}
|
}
|
||||||
|
markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, tc.attachID, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAttacherWithCSIDriver(t *testing.T) {
|
||||||
|
originalFeatures := utilfeature.DefaultFeatureGate.DeepCopy()
|
||||||
|
defer func() {
|
||||||
|
utilfeature.DefaultFeatureGate = originalFeatures
|
||||||
|
}()
|
||||||
|
err := utilfeature.DefaultFeatureGate.Set("CSISkipAttach=true")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to set CSISkipAttach=true: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
driver string
|
||||||
|
expectVolumeAttachment bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CSIDriver not attachable",
|
||||||
|
driver: "not-attachable",
|
||||||
|
expectVolumeAttachment: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIDriver is attachable",
|
||||||
|
driver: "attachable",
|
||||||
|
expectVolumeAttachment: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIDriver.AttachRequired not set -> failure",
|
||||||
|
driver: "nil",
|
||||||
|
expectVolumeAttachment: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIDriver does not exist not set -> failure",
|
||||||
|
driver: "unknown",
|
||||||
|
expectVolumeAttachment: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
fakeCSIClient := fakecsi.NewSimpleClientset(
|
||||||
|
getCSIDriver("not-attachable", nil, &bFalse),
|
||||||
|
getCSIDriver("attachable", nil, &bTrue),
|
||||||
|
getCSIDriver("nil", nil, nil),
|
||||||
|
)
|
||||||
|
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeCSIClient)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
attacher, err := plug.NewAttacher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create new attacher: %v", err)
|
||||||
|
}
|
||||||
|
csiAttacher := attacher.(*csiAttacher)
|
||||||
|
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
|
||||||
|
|
||||||
|
expectedAttachID := getAttachmentName("test-vol", test.driver, "node")
|
||||||
|
status := storage.VolumeAttachmentStatus{
|
||||||
|
Attached: true,
|
||||||
|
}
|
||||||
|
if test.expectVolumeAttachment {
|
||||||
|
go markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, expectedAttachID, status)
|
||||||
|
}
|
||||||
|
attachID, err := csiAttacher.Attach(spec, types.NodeName("node"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Attach() failed: %s", err)
|
||||||
|
}
|
||||||
|
if test.expectVolumeAttachment && attachID == "" {
|
||||||
|
t.Errorf("Epected attachID, got nothing")
|
||||||
|
}
|
||||||
|
if !test.expectVolumeAttachment && attachID != "" {
|
||||||
|
t.Errorf("Epected empty attachID, got %q", attachID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAttacherWaitForVolumeAttachmentWithCSIDriver(t *testing.T) {
|
||||||
|
originalFeatures := utilfeature.DefaultFeatureGate.DeepCopy()
|
||||||
|
defer func() {
|
||||||
|
utilfeature.DefaultFeatureGate = originalFeatures
|
||||||
|
}()
|
||||||
|
err := utilfeature.DefaultFeatureGate.Set("CSISkipAttach=true")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to set CSISkipAttach=true: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// In order to detect if the volume plugin would skip WaitForAttach for non-attachable drivers,
|
||||||
|
// we do not instantiate any VolumeAttachment. So if the plugin does not skip attach, WaitForVolumeAttachment
|
||||||
|
// will return an error that volume attachment was not found.
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
driver string
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CSIDriver not attachable -> success",
|
||||||
|
driver: "not-attachable",
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIDriver is attachable -> failure",
|
||||||
|
driver: "attachable",
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIDriver.AttachRequired not set -> failure",
|
||||||
|
driver: "nil",
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIDriver does not exist not set -> failure",
|
||||||
|
driver: "unknown",
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
fakeCSIClient := fakecsi.NewSimpleClientset(
|
||||||
|
getCSIDriver("not-attachable", nil, &bFalse),
|
||||||
|
getCSIDriver("attachable", nil, &bTrue),
|
||||||
|
getCSIDriver("nil", nil, nil),
|
||||||
|
)
|
||||||
|
plug, tmpDir := newTestPlugin(t, nil, fakeCSIClient)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
attacher, err := plug.NewAttacher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create new attacher: %v", err)
|
||||||
|
}
|
||||||
|
csiAttacher := attacher.(*csiAttacher)
|
||||||
|
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
|
||||||
|
_, err = csiAttacher.WaitForAttach(spec, "", nil, time.Second)
|
||||||
|
if err != nil && !test.expectError {
|
||||||
|
t.Errorf("Unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
if err == nil && test.expectError {
|
||||||
|
t.Errorf("Expected error, got none")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,7 +395,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
attacher, err := plug.NewAttacher()
|
attacher, err := plug.NewAttacher()
|
||||||
@ -287,7 +445,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAttacherVolumesAreAttached(t *testing.T) {
|
func TestAttacherVolumesAreAttached(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
attacher, err := plug.NewAttacher()
|
attacher, err := plug.NewAttacher()
|
||||||
@ -374,7 +532,7 @@ func TestAttacherDetach(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Logf("running test: %v", tc.name)
|
t.Logf("running test: %v", tc.name)
|
||||||
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
|
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
if tc.reactor != nil {
|
if tc.reactor != nil {
|
||||||
client.PrependReactor("*", "*", tc.reactor)
|
client.PrependReactor("*", "*", tc.reactor)
|
||||||
@ -423,7 +581,7 @@ func TestAttacherDetach(t *testing.T) {
|
|||||||
func TestAttacherGetDeviceMountPath(t *testing.T) {
|
func TestAttacherGetDeviceMountPath(t *testing.T) {
|
||||||
// Setup
|
// Setup
|
||||||
// Create a new attacher
|
// Create a new attacher
|
||||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
attacher, err0 := plug.NewAttacher()
|
attacher, err0 := plug.NewAttacher()
|
||||||
if err0 != nil {
|
if err0 != nil {
|
||||||
@ -532,7 +690,7 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
// Create a new attacher
|
// Create a new attacher
|
||||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
attacher, err0 := plug.NewAttacher()
|
attacher, err0 := plug.NewAttacher()
|
||||||
if err0 != nil {
|
if err0 != nil {
|
||||||
@ -663,7 +821,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
|||||||
t.Logf("Running test case: %s", tc.testName)
|
t.Logf("Running test case: %s", tc.testName)
|
||||||
// Setup
|
// Setup
|
||||||
// Create a new attacher
|
// Create a new attacher
|
||||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
attacher, err0 := plug.NewAttacher()
|
attacher, err0 := plug.NewAttacher()
|
||||||
if err0 != nil {
|
if err0 != nil {
|
||||||
@ -749,7 +907,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a plugin mgr to load plugins and setup a fake client
|
// create a plugin mgr to load plugins and setup a fake client
|
||||||
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
|
func newTestWatchPlugin(t *testing.T, csiClient *fakecsi.Clientset) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
|
||||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't create temp dir: %v", err)
|
t.Fatalf("can't create temp dir: %v", err)
|
||||||
@ -759,10 +917,15 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
|
|||||||
fakeWatcher := watch.NewRaceFreeFake()
|
fakeWatcher := watch.NewRaceFreeFake()
|
||||||
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
|
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
|
||||||
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
|
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
|
||||||
host := volumetest.NewFakeVolumeHost(
|
if csiClient == nil {
|
||||||
|
csiClient = fakecsi.NewSimpleClientset()
|
||||||
|
}
|
||||||
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
fakeClient,
|
fakeClient,
|
||||||
|
csiClient,
|
||||||
nil,
|
nil,
|
||||||
|
"node",
|
||||||
)
|
)
|
||||||
plugMgr := &volume.VolumePluginMgr{}
|
plugMgr := &volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||||
@ -777,5 +940,12 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
|
|||||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Wait until the informer in CSI volume plugin has all CSIDrivers.
|
||||||
|
if csiPlug.csiDriverInformer.Informer().HasSynced() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return csiPlug, fakeWatcher, tmpDir, fakeClient
|
return csiPlug, fakeWatcher, tmpDir, fakeClient
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
|
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
// TODO (vladimirvivien) specName with slashes will not work
|
// TODO (vladimirvivien) specName with slashes will not work
|
||||||
@ -77,13 +77,14 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockMapperSetupDevice(t *testing.T) {
|
func TestBlockMapperSetupDevice(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
fakeClient := fakeclient.NewSimpleClientset()
|
||||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
fakeClient,
|
fakeClient,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
"fakeNode",
|
"fakeNode",
|
||||||
)
|
)
|
||||||
plug.host = host
|
plug.host = host
|
||||||
@ -134,13 +135,14 @@ func TestBlockMapperSetupDevice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockMapperMapDevice(t *testing.T) {
|
func TestBlockMapperMapDevice(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
fakeClient := fakeclient.NewSimpleClientset()
|
||||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
fakeClient,
|
fakeClient,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
"fakeNode",
|
"fakeNode",
|
||||||
)
|
)
|
||||||
plug.host = host
|
plug.host = host
|
||||||
@ -202,13 +204,14 @@ func TestBlockMapperMapDevice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockMapperTearDownDevice(t *testing.T) {
|
func TestBlockMapperTearDownDevice(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
fakeClient := fakeclient.NewSimpleClientset()
|
||||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
fakeClient,
|
fakeClient,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
"fakeNode",
|
"fakeNode",
|
||||||
)
|
)
|
||||||
plug.host = host
|
plug.host = host
|
||||||
|
@ -18,7 +18,6 @@ package csi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -26,7 +25,6 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
@ -113,9 +111,6 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
csi := c.csiClient
|
csi := c.csiClient
|
||||||
nodeName := string(c.plugin.host.GetNodeName())
|
|
||||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -134,20 +129,13 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||||
if c.volumeInfo == nil {
|
if c.volumeInfo == nil {
|
||||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
nodeName := string(c.plugin.host.GetNodeName())
|
||||||
|
c.volumeInfo, err = c.plugin.getPublishVolumeInfo(c.k8s, c.volumeID, c.driverName, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if attachment == nil {
|
|
||||||
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
|
||||||
return errors.New("no existing VolumeAttachment found")
|
|
||||||
}
|
|
||||||
c.volumeInfo = attachment.Status.AttachmentMetadata
|
|
||||||
}
|
}
|
||||||
|
|
||||||
attribs := csiSource.VolumeAttributes
|
attribs := csiSource.VolumeAttributes
|
||||||
|
@ -30,20 +30,23 @@ import (
|
|||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||||
|
csiapi "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
testDriver = "test-driver"
|
testDriver = "test-driver"
|
||||||
testVol = "vol-123"
|
testVol = "vol-123"
|
||||||
testns = "test-ns"
|
testns = "test-ns"
|
||||||
testPodUID = types.UID("test-pod")
|
testPod = "test-pod"
|
||||||
|
testPodUID = types.UID("test-pod")
|
||||||
|
testAccount = "test-service-account"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMounterGetPath(t *testing.T) {
|
func TestMounterGetPath(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
// TODO (vladimirvivien) specName with slashes will not work
|
// TODO (vladimirvivien) specName with slashes will not work
|
||||||
@ -86,13 +89,14 @@ func TestMounterGetPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMounterSetUp(t *testing.T) {
|
func TestMounterSetUp(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
fakeClient := fakeclient.NewSimpleClientset()
|
||||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
fakeClient,
|
fakeClient,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
"fakeNode",
|
"fakeNode",
|
||||||
)
|
)
|
||||||
plug.host = host
|
plug.host = host
|
||||||
@ -167,7 +171,7 @@ func TestMounterSetUp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmounterTeardown(t *testing.T) {
|
func TestUnmounterTeardown(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||||
|
|
||||||
@ -216,7 +220,7 @@ func TestUnmounterTeardown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSaveVolumeData(t *testing.T) {
|
func TestSaveVolumeData(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@ -262,3 +266,16 @@ func TestSaveVolumeData(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getCSIDriver(name string, requiresPodInfo *bool, attachable *bool) *csiapi.CSIDriver {
|
||||||
|
podInfoMountVersion := "v1"
|
||||||
|
return &csiapi.CSIDriver{
|
||||||
|
ObjectMeta: meta.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: csiapi.CSIDriverSpec{
|
||||||
|
PodInfoOnMountVersion: &podInfoMountVersion,
|
||||||
|
AttachRequired: attachable,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -29,12 +29,15 @@ import (
|
|||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
csiapiinformer "k8s.io/csi-api/pkg/client/informers/externalversions"
|
csiapiinformer "k8s.io/csi-api/pkg/client/informers/externalversions"
|
||||||
csiinformer "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1"
|
csiinformer "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1"
|
||||||
|
csilister "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||||
@ -60,6 +63,7 @@ const (
|
|||||||
type csiPlugin struct {
|
type csiPlugin struct {
|
||||||
host volume.VolumeHost
|
host volume.VolumeHost
|
||||||
blockEnabled bool
|
blockEnabled bool
|
||||||
|
csiDriverLister csilister.CSIDriverLister
|
||||||
csiDriverInformer csiinformer.CSIDriverInformer
|
csiDriverInformer csiinformer.CSIDriverInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,6 +149,7 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
|||||||
// Start informer for CSIDrivers.
|
// Start informer for CSIDrivers.
|
||||||
factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod)
|
factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod)
|
||||||
p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers()
|
p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers()
|
||||||
|
p.csiDriverLister = p.csiDriverInformer.Lister()
|
||||||
go factory.Start(wait.NeverStop)
|
go factory.Start(wait.NeverStop)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -487,3 +492,48 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP
|
|||||||
|
|
||||||
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *csiPlugin) skipAttach(driver string) (bool, error) {
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.CSISkipAttach) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if p.csiDriverLister == nil {
|
||||||
|
return false, errors.New("CSIDriver lister does not exist")
|
||||||
|
}
|
||||||
|
csiDriver, err := p.csiDriverLister.Get(driver)
|
||||||
|
if err != nil {
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
// Don't skip attach if CSIDriver does not exist
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if csiDriver.Spec.AttachRequired != nil && *csiDriver.Spec.AttachRequired == false {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *csiPlugin) getPublishVolumeInfo(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
|
||||||
|
skip, err := p.skipAttach(driver)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
attachID := getAttachmentName(handle, driver, nodeName)
|
||||||
|
|
||||||
|
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||||
|
attachment, err := client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err // This err already has enough context ("VolumeAttachment xyz not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if attachment == nil {
|
||||||
|
err = errors.New("no existing VolumeAttachment found")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return attachment.Status.AttachmentMetadata, nil
|
||||||
|
}
|
||||||
|
@ -30,12 +30,13 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||||
utiltesting "k8s.io/client-go/util/testing"
|
utiltesting "k8s.io/client-go/util/testing"
|
||||||
|
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// create a plugin mgr to load plugins and setup a fake client
|
// create a plugin mgr to load plugins and setup a fake client
|
||||||
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
func newTestPlugin(t *testing.T, client *fakeclient.Clientset, csiClient *fakecsi.Clientset) (*csiPlugin, string) {
|
||||||
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
|
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
|
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
|
||||||
@ -46,11 +47,18 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
|||||||
t.Fatalf("can't create temp dir: %v", err)
|
t.Fatalf("can't create temp dir: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
if client == nil {
|
||||||
host := volumetest.NewFakeVolumeHost(
|
client = fakeclient.NewSimpleClientset()
|
||||||
|
}
|
||||||
|
if csiClient == nil {
|
||||||
|
csiClient = fakecsi.NewSimpleClientset()
|
||||||
|
}
|
||||||
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
fakeClient,
|
client,
|
||||||
|
csiClient,
|
||||||
nil,
|
nil,
|
||||||
|
"fakeNode",
|
||||||
)
|
)
|
||||||
plugMgr := &volume.VolumePluginMgr{}
|
plugMgr := &volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||||
@ -65,6 +73,13 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
|||||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Wait until the informer in CSI volume plugin has all CSIDrivers.
|
||||||
|
if csiPlug.csiDriverInformer.Informer().HasSynced() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return csiPlug, tmpDir
|
return csiPlug, tmpDir
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +107,7 @@ func makeTestPV(name string, sizeGig int, driverName, volID string) *api.Persist
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginGetPluginName(t *testing.T) {
|
func TestPluginGetPluginName(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
if plug.GetPluginName() != "kubernetes.io/csi" {
|
if plug.GetPluginName() != "kubernetes.io/csi" {
|
||||||
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
|
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
|
||||||
@ -100,7 +115,7 @@ func TestPluginGetPluginName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginGetVolumeName(t *testing.T) {
|
func TestPluginGetVolumeName(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@ -129,7 +144,7 @@ func TestPluginGetVolumeName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginCanSupport(t *testing.T) {
|
func TestPluginCanSupport(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||||
@ -141,7 +156,7 @@ func TestPluginCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginConstructVolumeSpec(t *testing.T) {
|
func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -201,7 +216,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewMounter(t *testing.T) {
|
func TestPluginNewMounter(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||||
@ -249,7 +264,7 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewUnmounter(t *testing.T) {
|
func TestPluginNewUnmounter(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||||
@ -294,7 +309,7 @@ func TestPluginNewUnmounter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewAttacher(t *testing.T) {
|
func TestPluginNewAttacher(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
attacher, err := plug.NewAttacher()
|
attacher, err := plug.NewAttacher()
|
||||||
@ -312,7 +327,7 @@ func TestPluginNewAttacher(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewDetacher(t *testing.T) {
|
func TestPluginNewDetacher(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
detacher, err := plug.NewDetacher()
|
detacher, err := plug.NewDetacher()
|
||||||
@ -330,7 +345,7 @@ func TestPluginNewDetacher(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewBlockMapper(t *testing.T) {
|
func TestPluginNewBlockMapper(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
|
||||||
@ -375,7 +390,7 @@ func TestPluginNewBlockMapper(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewUnmapper(t *testing.T) {
|
func TestPluginNewUnmapper(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||||
@ -432,7 +447,7 @@ func TestPluginNewUnmapper(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
|
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t)
|
plug, tmpDir := newTestPlugin(t, nil, nil)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -73,9 +73,10 @@ func NewFakeVolumeHostWithNodeLabels(rootDir string, kubeClient clientset.Interf
|
|||||||
return volHost
|
return volHost
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFakeVolumeHostWithNodeName(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string) *fakeVolumeHost {
|
func NewFakeVolumeHostWithCSINodeName(rootDir string, kubeClient clientset.Interface, csiClient csiclientset.Interface, plugins []VolumePlugin, nodeName string) *fakeVolumeHost {
|
||||||
volHost := newFakeVolumeHost(rootDir, kubeClient, plugins, nil, nil)
|
volHost := newFakeVolumeHost(rootDir, kubeClient, plugins, nil, nil)
|
||||||
volHost.nodeName = nodeName
|
volHost.nodeName = nodeName
|
||||||
|
volHost.csiClient = csiClient
|
||||||
return volHost
|
return volHost
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,6 +73,9 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
|||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||||
role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
|
role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSISkipAttach) {
|
||||||
|
role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "watch", "list").Groups("csi.storage.k8s.io").Resources("csidrivers").RuleOrDie())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return role
|
return role
|
||||||
|
@ -159,6 +159,10 @@ func NodeRules() []rbacv1.PolicyRule {
|
|||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||||
volAttachRule := rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
|
volAttachRule := rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
|
||||||
nodePolicyRules = append(nodePolicyRules, volAttachRule)
|
nodePolicyRules = append(nodePolicyRules, volAttachRule)
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSISkipAttach) {
|
||||||
|
csiDriverRule := rbacv1helpers.NewRule("get", "watch", "list").Groups("csi.storage.k8s.io").Resources("csidrivers").RuleOrDie()
|
||||||
|
nodePolicyRules = append(nodePolicyRules, csiDriverRule)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node leases
|
// Node leases
|
||||||
|
@ -133,6 +133,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||||
|
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
|
@ -42,6 +42,7 @@ import (
|
|||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
scaleclient "k8s.io/client-go/scale"
|
scaleclient "k8s.io/client-go/scale"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
@ -67,6 +68,7 @@ type Framework struct {
|
|||||||
|
|
||||||
ClientSet clientset.Interface
|
ClientSet clientset.Interface
|
||||||
KubemarkExternalClusterClientSet clientset.Interface
|
KubemarkExternalClusterClientSet clientset.Interface
|
||||||
|
CSIClientSet csi.Interface
|
||||||
|
|
||||||
InternalClientset *internalclientset.Clientset
|
InternalClientset *internalclientset.Clientset
|
||||||
AggregatorClient *aggregatorclient.Clientset
|
AggregatorClient *aggregatorclient.Clientset
|
||||||
@ -181,6 +183,11 @@ func (f *Framework) BeforeEach() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
// csi.storage.k8s.io is based on CRD, which is served only as JSON
|
||||||
|
jsonConfig := config
|
||||||
|
jsonConfig.ContentType = "application/json"
|
||||||
|
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||||
// as they are required when creating a REST client.
|
// as they are required when creating a REST client.
|
||||||
|
@ -62,6 +62,8 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
|
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
"//test/e2e/generated:go_default_library",
|
"//test/e2e/generated:go_default_library",
|
||||||
|
@ -23,10 +23,18 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
csi "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
|
||||||
|
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
|
"crypto/sha256"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -55,6 +63,7 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
cs clientset.Interface
|
cs clientset.Interface
|
||||||
|
csics csiclient.Interface
|
||||||
ns *v1.Namespace
|
ns *v1.Namespace
|
||||||
node v1.Node
|
node v1.Node
|
||||||
config framework.VolumeTestConfig
|
config framework.VolumeTestConfig
|
||||||
@ -62,6 +71,7 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
|||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
cs = f.ClientSet
|
cs = f.ClientSet
|
||||||
|
csics = f.CSIClientSet
|
||||||
ns = f.Namespace
|
ns = f.Namespace
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
node = nodes.Items[rand.Intn(len(nodes.Items))]
|
node = nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
@ -102,8 +112,183 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use [Serial], because there can be only one CSIDriver for csi-hostpath driver.
|
||||||
|
Context("CSI attach test using HostPath driver [Serial][Feature:CSISkipAttach]", func() {
|
||||||
|
var (
|
||||||
|
driver csiTestDriver
|
||||||
|
)
|
||||||
|
BeforeEach(func() {
|
||||||
|
driver = initCSIHostpath(f, config)
|
||||||
|
driver.createCSIDriver()
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
driver.cleanupCSIDriver()
|
||||||
|
})
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
driverAttachable bool
|
||||||
|
driverExists bool
|
||||||
|
expectVolumeAttachment bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "non-attachable volume does not need VolumeAttachment",
|
||||||
|
driverAttachable: false,
|
||||||
|
driverExists: true,
|
||||||
|
expectVolumeAttachment: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "attachable volume needs VolumeAttachment",
|
||||||
|
driverAttachable: true,
|
||||||
|
driverExists: true,
|
||||||
|
expectVolumeAttachment: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "volume with no CSI driver needs VolumeAttachment",
|
||||||
|
driverExists: false,
|
||||||
|
expectVolumeAttachment: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range tests {
|
||||||
|
test := t
|
||||||
|
It(test.name, func() {
|
||||||
|
if test.driverExists {
|
||||||
|
driver := createCSIDriver(csics, test.driverAttachable)
|
||||||
|
if driver != nil {
|
||||||
|
defer csics.CsiV1alpha1().CSIDrivers().Delete(driver.Name, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
By("Creating pod")
|
||||||
|
t := driver.createStorageClassTest(node)
|
||||||
|
class, claim, pod := startPausePod(cs, t, ns.Name)
|
||||||
|
if class != nil {
|
||||||
|
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
|
||||||
|
}
|
||||||
|
if claim != nil {
|
||||||
|
defer cs.CoreV1().PersistentVolumeClaims(ns.Name).Delete(claim.Name, nil)
|
||||||
|
}
|
||||||
|
if pod != nil {
|
||||||
|
// Fully delete (=unmount) the pod before deleting CSI driver
|
||||||
|
defer framework.DeletePodWithWait(f, cs, pod)
|
||||||
|
}
|
||||||
|
if pod == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace)
|
||||||
|
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||||
|
|
||||||
|
By("Checking if VolumeAttachment was created for the pod")
|
||||||
|
// Check that VolumeAttachment does not exist
|
||||||
|
handle := getVolumeHandle(cs, claim)
|
||||||
|
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, t.provisioner, node.Name)))
|
||||||
|
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
||||||
|
_, err = cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
if test.expectVolumeAttachment {
|
||||||
|
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !test.expectVolumeAttachment {
|
||||||
|
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
func createCSIDriver(csics csiclient.Interface, attachable bool) *csi.CSIDriver {
|
||||||
|
By("Creating CSIDriver instance")
|
||||||
|
driver := &csi.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "csi-hostpath",
|
||||||
|
},
|
||||||
|
Spec: csi.CSIDriverSpec{
|
||||||
|
AttachRequired: &attachable,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
driver, err := csics.CsiV1alpha1().CSIDrivers().Create(driver)
|
||||||
|
framework.ExpectNoError(err, "Failed to create CSIDriver: %v", err)
|
||||||
|
return driver
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
|
||||||
|
// re-get the claim to the the latest state with bound volume
|
||||||
|
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.ExpectNoError(err, "Cannot get PVC")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
pvName := claim.Spec.VolumeName
|
||||||
|
pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.ExpectNoError(err, "Cannot get PV")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if pv.Spec.CSI == nil {
|
||||||
|
Expect(pv.Spec.CSI).NotTo(BeNil())
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return pv.Spec.CSI.VolumeHandle
|
||||||
|
}
|
||||||
|
|
||||||
|
func startPausePod(cs clientset.Interface, t storageClassTest, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
|
class := newStorageClass(t, ns, "")
|
||||||
|
class, err := cs.StorageV1().StorageClasses().Create(class)
|
||||||
|
framework.ExpectNoError(err, "Failed to create class : %v", err)
|
||||||
|
claim := newClaim(t, ns, "")
|
||||||
|
claim.Spec.StorageClassName = &class.Name
|
||||||
|
claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim)
|
||||||
|
framework.ExpectNoError(err, "Failed to create claim: %v", err)
|
||||||
|
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "pvc-volume-tester-",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "volume-tester",
|
||||||
|
Image: imageutils.GetE2EImage(imageutils.Pause),
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
MountPath: "/mnt/test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
|
ClaimName: claim.Name,
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.nodeName) != 0 {
|
||||||
|
pod.Spec.NodeName = t.nodeName
|
||||||
|
}
|
||||||
|
pod, err = cs.CoreV1().Pods(ns).Create(pod)
|
||||||
|
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||||
|
return class, claim, pod
|
||||||
|
}
|
||||||
|
|
||||||
type hostpathCSIDriver struct {
|
type hostpathCSIDriver struct {
|
||||||
combinedClusterRoleNames []string
|
combinedClusterRoleNames []string
|
||||||
serviceAccount *v1.ServiceAccount
|
serviceAccount *v1.ServiceAccount
|
||||||
|
Loading…
Reference in New Issue
Block a user