mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Move volume helpers to "k8s.io/component-helpers/storage/volume".
This patch aims to simplify decoupling "pkg/scheduler/framework/plugins" from internal "k8s.io/kubernetes" packages. More described in issue #89930 and PR #102953. Some helpers from "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" package moved to "k8s.io/component-helpers/storage/volume" package: - IsDelayBindingMode - GetBindVolumeToClaim - IsVolumeBoundToClaim - FindMatchingVolume - CheckVolumeModeMismatches - CheckAccessModes - GetVolumeNodeAffinity Also "CheckNodeAffinity" from "k8s.io/kubernetes/pkg/volume/util" package moved to "k8s.io/component-helpers/storage/volume" package to prevent diamond dependency conflict. Signed-off-by: Konstantin Misyutin <konstantin.misyutin@huawei.com>
This commit is contained in:
parent
1007f4974d
commit
1d7cefe9c4
@ -22,7 +22,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
)
|
||||
|
||||
// Test single call to syncClaim and syncVolume methods.
|
||||
|
@ -20,10 +20,10 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
|
@ -41,9 +41,9 @@ import (
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
|
@ -20,10 +20,10 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -1377,132 +1377,6 @@ func TestBestMatchDelayed(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindMatchVolumeWithNode(t *testing.T) {
|
||||
volumes := createTestVolumes()
|
||||
node1 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value1"},
|
||||
},
|
||||
}
|
||||
node2 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value2"},
|
||||
},
|
||||
}
|
||||
node3 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value3"},
|
||||
},
|
||||
}
|
||||
node4 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value4"},
|
||||
},
|
||||
}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedMatch string
|
||||
claim *v1.PersistentVolumeClaim
|
||||
node *v1.Node
|
||||
excludedVolumes map[string]*v1.PersistentVolume
|
||||
}{
|
||||
"success-match": {
|
||||
expectedMatch: "affinity-pv",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
}),
|
||||
node: node1,
|
||||
},
|
||||
"success-prebound": {
|
||||
expectedMatch: "affinity-prebound",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
pvc.Name = "claim02"
|
||||
}),
|
||||
node: node1,
|
||||
},
|
||||
"success-exclusion": {
|
||||
expectedMatch: "affinity-pv2",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
}),
|
||||
node: node1,
|
||||
excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil},
|
||||
},
|
||||
"fail-exclusion": {
|
||||
expectedMatch: "",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
}),
|
||||
node: node1,
|
||||
excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil, "affinity002": nil},
|
||||
},
|
||||
"fail-accessmode": {
|
||||
expectedMatch: "",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
}),
|
||||
node: node1,
|
||||
},
|
||||
"fail-nodeaffinity": {
|
||||
expectedMatch: "",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
}),
|
||||
node: node2,
|
||||
},
|
||||
"fail-prebound-node-affinity": {
|
||||
expectedMatch: "",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
pvc.Name = "claim02"
|
||||
}),
|
||||
node: node3,
|
||||
},
|
||||
"fail-nonavaliable": {
|
||||
expectedMatch: "",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
pvc.Name = "claim04"
|
||||
}),
|
||||
node: node4,
|
||||
},
|
||||
"success-bad-and-good-node-affinity": {
|
||||
expectedMatch: "affinity-pv3",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
pvc.Name = "claim03"
|
||||
}),
|
||||
node: node3,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
volume, err := pvutil.FindMatchingVolume(scenario.claim, volumes, scenario.node, scenario.excludedVolumes, true)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error matching volume by claim: %v", err)
|
||||
}
|
||||
if len(scenario.expectedMatch) != 0 && volume == nil {
|
||||
t.Errorf("Expected match but received nil volume for scenario: %s", name)
|
||||
}
|
||||
if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch {
|
||||
t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name)
|
||||
}
|
||||
if len(scenario.expectedMatch) == 0 && volume != nil {
|
||||
t.Errorf("Unexpected match for scenario: %s, matched with %s instead", name, volume.UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAccessModes(t *testing.T) {
|
||||
volume := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
|
@ -27,9 +27,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
)
|
||||
|
||||
var class1Parameters = map[string]string{
|
||||
|
@ -43,11 +43,10 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
volerr "k8s.io/cloud-provider/volume/errors"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/common"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
@ -287,8 +286,8 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
|
||||
return fmt.Errorf("requested PV is too small")
|
||||
}
|
||||
|
||||
requestedClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
if storagehelpers.GetPersistentVolumeClass(volume) != requestedClass {
|
||||
requestedClass := pvutil.GetPersistentVolumeClaimClass(claim)
|
||||
if pvutil.GetPersistentVolumeClass(volume) != requestedClass {
|
||||
return fmt.Errorf("storageClassName does not match")
|
||||
}
|
||||
|
||||
@ -355,7 +354,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl
|
||||
if err = ctrl.emitEventForUnboundDelayBindingClaim(claim); err != nil {
|
||||
return err
|
||||
}
|
||||
case storagehelpers.GetPersistentVolumeClaimClass(claim) != "":
|
||||
case pvutil.GetPersistentVolumeClaimClass(claim) != "":
|
||||
if err = ctrl.provisionClaim(ctx, claim); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1526,7 +1525,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
||||
claim *v1.PersistentVolumeClaim,
|
||||
plugin vol.ProvisionableVolumePlugin,
|
||||
storageClass *storage.StorageClass) (string, error) {
|
||||
claimClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
claimClass := pvutil.GetPersistentVolumeClaimClass(claim)
|
||||
klog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
|
||||
|
||||
// called from provisionClaim(), in this case, plugin MUST NOT be nil
|
||||
@ -1736,7 +1735,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperationExternal(
|
||||
ctx context.Context,
|
||||
claim *v1.PersistentVolumeClaim,
|
||||
storageClass *storage.StorageClass) (string, error) {
|
||||
claimClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
claimClass := pvutil.GetPersistentVolumeClaimClass(claim)
|
||||
klog.V(4).Infof("provisionClaimOperationExternal [%s] started, class: %q", claimToClaimKey(claim), claimClass)
|
||||
// Set provisionerName to external provisioner name by setClaimProvisioner
|
||||
var err error
|
||||
@ -1834,7 +1833,7 @@ func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.Pers
|
||||
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *v1.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *storage.StorageClass, error) {
|
||||
// provisionClaim() which leads here is never called with claimClass=="", we
|
||||
// can save some checks.
|
||||
claimClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
claimClass := pvutil.GetPersistentVolumeClaimClass(claim)
|
||||
class, err := ctrl.classLister.Get(claimClass)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -1902,7 +1901,7 @@ func (ctrl *PersistentVolumeController) getProvisionerNameFromVolume(volume *v1.
|
||||
// the AnnDynamicallyProvisioned annotation value, use the storageClass's Provisioner
|
||||
// field to avoid explosion of the metric in the cases like local storage provisioner
|
||||
// tagging a volume with arbitrary provisioner names
|
||||
storageClass := storagehelpers.GetPersistentVolumeClass(volume)
|
||||
storageClass := pvutil.GetPersistentVolumeClass(volume)
|
||||
class, err := ctrl.classLister.Get(storageClass)
|
||||
if err != nil {
|
||||
return "N/A"
|
||||
|
@ -40,11 +40,11 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/common"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
|
@ -36,23 +36,16 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume/csimigration"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var (
|
||||
classNotHere = "not-here"
|
||||
classNoMode = "no-mode"
|
||||
classImmediateMode = "immediate-mode"
|
||||
classWaitMode = "wait-mode"
|
||||
)
|
||||
|
||||
// Test the real controller methods (add/update/delete claim/volume) with
|
||||
// a fake API server.
|
||||
// There is no controller API to 'initiate syncAll now', therefore these tests
|
||||
@ -469,19 +462,6 @@ func TestControllerCacheParsingError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: scName,
|
||||
},
|
||||
}
|
||||
|
||||
return claim
|
||||
}
|
||||
|
||||
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -491,69 +471,6 @@ func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storage
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelayBindingMode(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
shouldDelay bool
|
||||
shouldFail bool
|
||||
}{
|
||||
"nil-class": {
|
||||
pvc: makePVCClass(nil),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"class-not-found": {
|
||||
pvc: makePVCClass(&classNotHere),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"no-mode-class": {
|
||||
pvc: makePVCClass(&classNoMode),
|
||||
shouldDelay: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate-mode-class": {
|
||||
pvc: makePVCClass(&classImmediateMode),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"wait-mode-class": {
|
||||
pvc: makePVCClass(&classWaitMode),
|
||||
shouldDelay: true,
|
||||
},
|
||||
}
|
||||
|
||||
classes := []*storagev1.StorageClass{
|
||||
makeStorageClass(classNoMode, nil),
|
||||
makeStorageClass(classImmediateMode, &modeImmediate),
|
||||
makeStorageClass(classWaitMode, &modeWait),
|
||||
}
|
||||
|
||||
client := &fake.Clientset{}
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
classInformer := informerFactory.Storage().V1().StorageClasses()
|
||||
ctrl := &PersistentVolumeController{
|
||||
classLister: classInformer.Lister(),
|
||||
translator: csitrans.New(),
|
||||
}
|
||||
|
||||
for _, class := range classes {
|
||||
if err := classInformer.Informer().GetIndexer().Add(class); err != nil {
|
||||
t.Fatalf("Failed to add storage class %q: %v", class.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
shouldDelay, err := pvutil.IsDelayBindingMode(test.pvc, ctrl.classLister)
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if err == nil && test.shouldFail {
|
||||
t.Errorf("Test %q returned success, expected error", name)
|
||||
}
|
||||
if shouldDelay != test.shouldDelay {
|
||||
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnnealMigrationAnnotations(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
|
||||
|
||||
|
@ -20,11 +20,11 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
)
|
||||
|
||||
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
|
||||
|
@ -41,15 +41,13 @@ import (
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
csiplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ConflictReason is used for the special strings which explain why
|
||||
@ -608,7 +606,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
}
|
||||
|
||||
// Check PV's node affinity (the node might not have the proper label)
|
||||
if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil {
|
||||
if err := pvutil.CheckNodeAffinity(pv, node.Labels); err != nil {
|
||||
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err)
|
||||
}
|
||||
|
||||
@ -666,7 +664,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil {
|
||||
if err := pvutil.CheckNodeAffinity(pv, node.Labels); err != nil {
|
||||
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err)
|
||||
}
|
||||
}
|
||||
@ -806,7 +804,7 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
|
||||
return false, true, err
|
||||
}
|
||||
|
||||
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
|
||||
err = pvutil.CheckNodeAffinity(pv, node.Labels)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err)
|
||||
return false, true, nil
|
||||
@ -830,7 +828,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
|
||||
for _, pvc := range claimsToBind {
|
||||
// Get storage class name from each PVC
|
||||
storageClassName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
|
||||
storageClassName := pvutil.GetPersistentVolumeClaimClass(pvc)
|
||||
allPVs := b.pvCache.ListPVs(storageClassName)
|
||||
|
||||
// Find a matching PV
|
||||
@ -868,7 +866,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
// fails or we encounter an error.
|
||||
for _, claim := range claimsToProvision {
|
||||
pvcName := getPVCName(claim)
|
||||
className := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
className := pvutil.GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, false, nil, fmt.Errorf("no class for claim %q", pvcName)
|
||||
}
|
||||
|
@ -43,10 +43,10 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
|
@ -46,7 +46,7 @@ import (
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
clientcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
volerr "k8s.io/cloud-provider/volume/errors"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
@ -2263,7 +2264,7 @@ func checkNodeAffinity(og *operationGenerator, volumeToMount VolumeToMount) erro
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CheckNodeAffinity(pv, nodeLabels)
|
||||
err = pvutil.CheckNodeAffinity(pv, nodeLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
@ -164,30 +163,6 @@ func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume)
|
||||
return class, nil
|
||||
}
|
||||
|
||||
// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
|
||||
// This ensures that we don't mount a volume that doesn't belong to this node
|
||||
func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
|
||||
return checkVolumeNodeAffinity(pv, &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: nodeLabels}})
|
||||
}
|
||||
|
||||
func checkVolumeNodeAffinity(pv *v1.PersistentVolume, node *v1.Node) error {
|
||||
if pv.Spec.NodeAffinity == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pv.Spec.NodeAffinity.Required != nil {
|
||||
terms := pv.Spec.NodeAffinity.Required
|
||||
klog.V(10).Infof("Match for Required node selector terms %+v", terms)
|
||||
if matches, err := corev1.MatchNodeSelectorTerms(node, terms); err != nil {
|
||||
return err
|
||||
} else if !matches {
|
||||
return fmt.Errorf("no matching NodeSelectorTerms")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadPodFromFile will read, decode, and return a Pod from a file.
|
||||
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
|
||||
if filePath == "" {
|
||||
|
@ -37,199 +37,6 @@ import (
|
||||
utilptr "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
var nodeLabels = map[string]string{
|
||||
"test-key1": "test-value1",
|
||||
"test-key2": "test-value2",
|
||||
}
|
||||
|
||||
func TestCheckVolumeNodeAffinity(t *testing.T) {
|
||||
type affinityTest struct {
|
||||
name string
|
||||
expectSuccess bool
|
||||
pv *v1.PersistentVolume
|
||||
}
|
||||
|
||||
cases := []affinityTest{
|
||||
{
|
||||
name: "valid-nil",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, nil),
|
||||
},
|
||||
{
|
||||
name: "valid-no-constraints",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{}),
|
||||
},
|
||||
{
|
||||
name: "select-nothing",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{}}),
|
||||
},
|
||||
{
|
||||
name: "select-nothing-empty-terms",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "valid-multiple-terms",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "valid-multiple-match-expressions",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "invalid-multiple-match-expressions-key",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
{
|
||||
Key: "test-key3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "invalid-multiple-match-expressions-values",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value3", "test-value4"},
|
||||
},
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "invalid-multiple-terms",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
err := CheckNodeAffinity(c.pv, nodeLabels)
|
||||
|
||||
if err != nil && c.expectSuccess {
|
||||
t.Errorf("CheckTopology %v returned error: %v", c.name, err)
|
||||
}
|
||||
if err == nil && !c.expectSuccess {
|
||||
t.Errorf("CheckTopology %v returned success, expected error", c.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testVolumeWithNodeAffinity(t *testing.T, affinity *v1.VolumeNodeAffinity) *v1.PersistentVolume {
|
||||
objMeta := metav1.ObjectMeta{Name: "test-constraints"}
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: objMeta,
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: affinity,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadPodFromFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -30,10 +30,10 @@ import (
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
cloudvolume "k8s.io/cloud-provider/volume"
|
||||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
||||
persistentvolume "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
persistentvolume "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
)
|
||||
|
||||
|
@ -30,8 +30,8 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
admissiontesting "k8s.io/apiserver/pkg/admission/testing"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
persistentvolume "k8s.io/component-helpers/storage/volume"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
persistentvolume "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
)
|
||||
|
||||
type mockVolumes struct {
|
||||
|
1
staging/src/k8s.io/component-helpers/go.sum
generated
1
staging/src/k8s.io/component-helpers/go.sum
generated
@ -157,6 +157,7 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
|
@ -16,7 +16,13 @@ limitations under the License.
|
||||
|
||||
package volume
|
||||
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
)
|
||||
|
||||
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
|
||||
// requested, it returns "".
|
||||
@ -42,3 +48,26 @@ func GetPersistentVolumeClass(volume *v1.PersistentVolume) string {
|
||||
|
||||
return volume.Spec.StorageClassName
|
||||
}
|
||||
|
||||
// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
|
||||
// This ensures that we don't mount a volume that doesn't belong to this node
|
||||
func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
|
||||
return checkVolumeNodeAffinity(pv, &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: nodeLabels}})
|
||||
}
|
||||
|
||||
func checkVolumeNodeAffinity(pv *v1.PersistentVolume, node *v1.Node) error {
|
||||
if pv.Spec.NodeAffinity == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pv.Spec.NodeAffinity.Required != nil {
|
||||
terms := pv.Spec.NodeAffinity.Required
|
||||
if matches, err := corev1.MatchNodeSelectorTerms(node, terms); err != nil {
|
||||
return err
|
||||
} else if !matches {
|
||||
return fmt.Errorf("no matching NodeSelectorTerms")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -0,0 +1,216 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var nodeLabels = map[string]string{
|
||||
"test-key1": "test-value1",
|
||||
"test-key2": "test-value2",
|
||||
}
|
||||
|
||||
func TestCheckVolumeNodeAffinity(t *testing.T) {
|
||||
type affinityTest struct {
|
||||
name string
|
||||
expectSuccess bool
|
||||
pv *v1.PersistentVolume
|
||||
}
|
||||
|
||||
cases := []affinityTest{
|
||||
{
|
||||
name: "valid-nil",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, nil),
|
||||
},
|
||||
{
|
||||
name: "valid-no-constraints",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{}),
|
||||
},
|
||||
{
|
||||
name: "select-nothing",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{}}),
|
||||
},
|
||||
{
|
||||
name: "select-nothing-empty-terms",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "valid-multiple-terms",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "valid-multiple-match-expressions",
|
||||
expectSuccess: true,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "invalid-multiple-match-expressions-key",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
{
|
||||
Key: "test-key3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "invalid-multiple-match-expressions-values",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value3", "test-value4"},
|
||||
},
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "invalid-multiple-terms",
|
||||
expectSuccess: false,
|
||||
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value1", "test-value3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "test-key2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test-value0", "test-value1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
err := CheckNodeAffinity(c.pv, nodeLabels)
|
||||
|
||||
if err != nil && c.expectSuccess {
|
||||
t.Errorf("CheckTopology %v returned error: %v", c.name, err)
|
||||
}
|
||||
if err == nil && !c.expectSuccess {
|
||||
t.Errorf("CheckTopology %v returned success, expected error", c.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testVolumeWithNodeAffinity(t *testing.T, affinity *v1.VolumeNodeAffinity) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-constraints"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: affinity,
|
||||
},
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
package volume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -28,8 +28,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/tools/reference"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -95,7 +93,7 @@ func IsDelayBindingProvisioning(claim *v1.PersistentVolumeClaim) bool {
|
||||
|
||||
// IsDelayBindingMode checks if claim is in delay binding mode.
|
||||
func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) {
|
||||
className := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
className := GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, nil
|
||||
}
|
||||
@ -194,7 +192,7 @@ func FindMatchingVolume(
|
||||
var smallestVolume *v1.PersistentVolume
|
||||
var smallestVolumeQty resource.Quantity
|
||||
requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestedClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
|
||||
requestedClass := GetPersistentVolumeClaimClass(claim)
|
||||
|
||||
var selector labels.Selector
|
||||
if claim.Spec.Selector != nil {
|
||||
@ -238,9 +236,9 @@ func FindMatchingVolume(
|
||||
if node != nil {
|
||||
// Scheduler path, check that the PV NodeAffinity
|
||||
// is satisfied by the node
|
||||
// volumeutil.CheckNodeAffinity is the most expensive call in this loop.
|
||||
// CheckNodeAffinity is the most expensive call in this loop.
|
||||
// We should check cheaper conditions first or consider optimizing this function.
|
||||
err := volumeutil.CheckNodeAffinity(volume, node.Labels)
|
||||
err := CheckNodeAffinity(volume, node.Labels)
|
||||
if err != nil {
|
||||
nodeAffinityValid = false
|
||||
}
|
||||
@ -278,7 +276,7 @@ func FindMatchingVolume(
|
||||
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
|
||||
continue
|
||||
}
|
||||
if storagehelpers.GetPersistentVolumeClass(volume) != requestedClass {
|
||||
if GetPersistentVolumeClass(volume) != requestedClass {
|
||||
continue
|
||||
}
|
||||
if !nodeAffinityValid {
|
@ -0,0 +1,300 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
classNotHere = "not-here"
|
||||
classNoMode = "no-mode"
|
||||
classImmediateMode = "immediate-mode"
|
||||
classWaitMode = "wait-mode"
|
||||
|
||||
modeImmediate = storagev1.VolumeBindingImmediate
|
||||
modeWait = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
)
|
||||
|
||||
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: scName,
|
||||
},
|
||||
}
|
||||
|
||||
return claim
|
||||
}
|
||||
|
||||
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scName,
|
||||
},
|
||||
VolumeBindingMode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelayBindingMode(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
shouldDelay bool
|
||||
shouldFail bool
|
||||
}{
|
||||
"nil-class": {
|
||||
pvc: makePVCClass(nil),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"class-not-found": {
|
||||
pvc: makePVCClass(&classNotHere),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"no-mode-class": {
|
||||
pvc: makePVCClass(&classNoMode),
|
||||
shouldDelay: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate-mode-class": {
|
||||
pvc: makePVCClass(&classImmediateMode),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"wait-mode-class": {
|
||||
pvc: makePVCClass(&classWaitMode),
|
||||
shouldDelay: true,
|
||||
},
|
||||
}
|
||||
|
||||
classes := []*storagev1.StorageClass{
|
||||
makeStorageClass(classNoMode, nil),
|
||||
makeStorageClass(classImmediateMode, &modeImmediate),
|
||||
makeStorageClass(classWaitMode, &modeWait),
|
||||
}
|
||||
|
||||
client := &fake.Clientset{}
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
classInformer := informerFactory.Storage().V1().StorageClasses()
|
||||
|
||||
for _, class := range classes {
|
||||
if err := classInformer.Informer().GetIndexer().Add(class); err != nil {
|
||||
t.Fatalf("Failed to add storage class %q: %v", class.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
shouldDelay, err := IsDelayBindingMode(test.pvc, classInformer.Lister())
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if err == nil && test.shouldFail {
|
||||
t.Errorf("Test %q returned success, expected error", name)
|
||||
}
|
||||
if shouldDelay != test.shouldDelay {
|
||||
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindMatchVolumeWithNode(t *testing.T) {
|
||||
volumes := []*v1.PersistentVolume{
|
||||
makeTestVolume("local-small", "local001", "5G", true, nil),
|
||||
makeTestVolume("local-pd-very-large", "local002", "200E", true, func(pv *v1.PersistentVolume) {
|
||||
pv.Spec.StorageClassName = "large"
|
||||
}),
|
||||
makeTestVolume("affinity-pv", "affinity001", "100G", true, func(pv *v1.PersistentVolume) {
|
||||
pv.Spec.StorageClassName = "wait"
|
||||
pv.Spec.NodeAffinity = GetVolumeNodeAffinity("key1", "value1")
|
||||
}),
|
||||
makeTestVolume("affinity-pv2", "affinity002", "150G", true, func(pv *v1.PersistentVolume) {
|
||||
pv.Spec.StorageClassName = "wait"
|
||||
pv.Spec.NodeAffinity = GetVolumeNodeAffinity("key1", "value1")
|
||||
}),
|
||||
makeTestVolume("affinity-prebound", "affinity003", "100G", true, func(pv *v1.PersistentVolume) {
|
||||
pv.Spec.StorageClassName = "wait"
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{Name: "claim02", Namespace: "myns"}
|
||||
pv.Spec.NodeAffinity = GetVolumeNodeAffinity("key1", "value1")
|
||||
}),
|
||||
makeTestVolume("affinity-pv3", "affinity003", "200G", true, func(pv *v1.PersistentVolume) {
|
||||
pv.Spec.StorageClassName = "wait"
|
||||
pv.Spec.NodeAffinity = GetVolumeNodeAffinity("key1", "value3")
|
||||
}),
|
||||
makeTestVolume("affinity-pv4", "affinity004", "200G", false, func(pv *v1.PersistentVolume) {
|
||||
pv.Spec.StorageClassName = "wait"
|
||||
pv.Spec.NodeAffinity = GetVolumeNodeAffinity("key1", "value4")
|
||||
}),
|
||||
}
|
||||
|
||||
node1 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value1"},
|
||||
},
|
||||
}
|
||||
node2 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value2"},
|
||||
},
|
||||
}
|
||||
node3 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value3"},
|
||||
},
|
||||
}
|
||||
node4 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value4"},
|
||||
},
|
||||
}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedMatch string
|
||||
claim *v1.PersistentVolumeClaim
|
||||
node *v1.Node
|
||||
excludedVolumes map[string]*v1.PersistentVolume
|
||||
}{
|
||||
"success-match": {
|
||||
expectedMatch: "affinity-pv",
|
||||
claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node1,
|
||||
},
|
||||
"success-prebound": {
|
||||
expectedMatch: "affinity-prebound",
|
||||
claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node1,
|
||||
},
|
||||
"success-exclusion": {
|
||||
expectedMatch: "affinity-pv2",
|
||||
claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node1,
|
||||
excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil},
|
||||
},
|
||||
"fail-exclusion": {
|
||||
expectedMatch: "",
|
||||
claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node1,
|
||||
excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil, "affinity002": nil},
|
||||
},
|
||||
"fail-accessmode": {
|
||||
expectedMatch: "",
|
||||
claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}),
|
||||
node: node1,
|
||||
},
|
||||
"fail-nodeaffinity": {
|
||||
expectedMatch: "",
|
||||
claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node2,
|
||||
},
|
||||
"fail-prebound-node-affinity": {
|
||||
expectedMatch: "",
|
||||
claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node3,
|
||||
},
|
||||
"fail-nonavaliable": {
|
||||
expectedMatch: "",
|
||||
claim: makeTestPersistentVolumeClaim("claim04", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node4,
|
||||
},
|
||||
"success-bad-and-good-node-affinity": {
|
||||
expectedMatch: "affinity-pv3",
|
||||
claim: makeTestPersistentVolumeClaim("claim03", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
node: node3,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
volume, err := FindMatchingVolume(scenario.claim, volumes, scenario.node, scenario.excludedVolumes, true)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error matching volume by claim: %v", err)
|
||||
}
|
||||
if len(scenario.expectedMatch) != 0 && volume == nil {
|
||||
t.Errorf("Expected match but received nil volume for scenario: %s", name)
|
||||
}
|
||||
if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch {
|
||||
t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name)
|
||||
}
|
||||
if len(scenario.expectedMatch) == 0 && volume != nil {
|
||||
t.Errorf("Unexpected match for scenario: %s, matched with %s instead", name, volume.UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestPersistentVolumeClaim(name string, size string, accessMode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
sc := "wait"
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: accessMode,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(size),
|
||||
},
|
||||
},
|
||||
StorageClassName: &sc,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestVolume(uid types.UID, name string, capacity string, available bool, modfn func(*v1.PersistentVolume)) *v1.PersistentVolume {
|
||||
var status v1.PersistentVolumeStatus
|
||||
if available {
|
||||
status = v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
}
|
||||
}
|
||||
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
|
||||
pv := v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uid,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: status,
|
||||
}
|
||||
|
||||
if modfn != nil {
|
||||
modfn(&pv)
|
||||
}
|
||||
return &pv
|
||||
}
|
@ -40,8 +40,8 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
pvutil "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/klog/v2"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
|
Loading…
Reference in New Issue
Block a user