mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
6de257e93f
@ -77,7 +77,7 @@ dependencies:
|
|||||||
match: const etcdImage
|
match: const etcdImage
|
||||||
|
|
||||||
- name: "etcd-image"
|
- name: "etcd-image"
|
||||||
version: 3.5.0
|
version: 3.5.1
|
||||||
refPaths:
|
refPaths:
|
||||||
- path: cluster/images/etcd/Makefile
|
- path: cluster/images/etcd/Makefile
|
||||||
match: BUNDLED_ETCD_VERSIONS\?|LATEST_ETCD_VERSION\?
|
match: BUNDLED_ETCD_VERSIONS\?|LATEST_ETCD_VERSION\?
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# Build the etcd image
|
# Build the etcd image
|
||||||
#
|
#
|
||||||
# Usage:
|
# Usage:
|
||||||
# [BUNDLED_ETCD_VERSIONS=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.0] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push)
|
# [BUNDLED_ETCD_VERSIONS=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.1] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push)
|
||||||
#
|
#
|
||||||
# The image contains different etcd versions to simplify
|
# The image contains different etcd versions to simplify
|
||||||
# upgrades. Thus be careful when removing any versions from here.
|
# upgrades. Thus be careful when removing any versions from here.
|
||||||
@ -26,15 +26,15 @@
|
|||||||
# Except from etcd-$(version) and etcdctl-$(version) binaries, we also
|
# Except from etcd-$(version) and etcdctl-$(version) binaries, we also
|
||||||
# need etcd and etcdctl binaries for backward compatibility reasons.
|
# need etcd and etcdctl binaries for backward compatibility reasons.
|
||||||
# That binary will be set to the last version from $(BUNDLED_ETCD_VERSIONS).
|
# That binary will be set to the last version from $(BUNDLED_ETCD_VERSIONS).
|
||||||
BUNDLED_ETCD_VERSIONS?=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.0
|
BUNDLED_ETCD_VERSIONS?=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.1
|
||||||
|
|
||||||
# LATEST_ETCD_VERSION identifies the most recent etcd version available.
|
# LATEST_ETCD_VERSION identifies the most recent etcd version available.
|
||||||
LATEST_ETCD_VERSION?=3.5.0
|
LATEST_ETCD_VERSION?=3.5.1
|
||||||
|
|
||||||
# REVISION provides a version number for this image and all it's bundled
|
# REVISION provides a version number for this image and all it's bundled
|
||||||
# artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment
|
# artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment
|
||||||
# for each revision of this image at that etcd version.
|
# for each revision of this image at that etcd version.
|
||||||
REVISION?=4
|
REVISION?=0
|
||||||
|
|
||||||
# IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "<etcd-version>-<revision>".
|
# IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "<etcd-version>-<revision>".
|
||||||
IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION)
|
IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION)
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
supportedEtcdVersions = []string{"3.0.17", "3.1.12", "3.2.24", "3.3.17", "3.4.13", "3.5.0"}
|
supportedEtcdVersions = []string{"3.0.17", "3.1.12", "3.2.24", "3.3.17", "3.4.13", "3.5.1"}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -552,7 +552,6 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control
|
|||||||
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"),
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err)
|
return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err)
|
||||||
@ -565,7 +564,6 @@ func startPVProtectionController(ctx context.Context, controllerContext Controll
|
|||||||
go pvprotection.NewPVProtectionController(
|
go pvprotection.NewPVProtectionController(
|
||||||
controllerContext.InformerFactory.Core().V1().PersistentVolumes(),
|
controllerContext.InformerFactory.Core().V1().PersistentVolumes(),
|
||||||
controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"),
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
|
||||||
).Run(ctx, 1)
|
).Run(ctx, 1)
|
||||||
return nil, true, nil
|
return nil, true, nil
|
||||||
}
|
}
|
||||||
|
@ -23,12 +23,9 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
ref "k8s.io/client-go/tools/reference"
|
ref "k8s.io/client-go/tools/reference"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1229,26 +1226,21 @@ func TestStorageObjectInUseProtectionFiltering(t *testing.T) {
|
|||||||
isExpectedMatch bool
|
isExpectedMatch bool
|
||||||
vol *v1.PersistentVolume
|
vol *v1.PersistentVolume
|
||||||
pvc *v1.PersistentVolumeClaim
|
pvc *v1.PersistentVolumeClaim
|
||||||
enableStorageObjectInUseProtection bool
|
|
||||||
}{
|
}{
|
||||||
"pv deletionTimeStamp not set": {
|
"pv deletionTimeStamp not set": {
|
||||||
isExpectedMatch: true,
|
isExpectedMatch: true,
|
||||||
vol: pv,
|
vol: pv,
|
||||||
pvc: pvc,
|
pvc: pvc,
|
||||||
enableStorageObjectInUseProtection: true,
|
|
||||||
},
|
},
|
||||||
"pv deletionTimeStamp set": {
|
"pv deletionTimeStamp set": {
|
||||||
isExpectedMatch: false,
|
isExpectedMatch: false,
|
||||||
vol: pvToDelete,
|
vol: pvToDelete,
|
||||||
pvc: pvc,
|
pvc: pvc,
|
||||||
enableStorageObjectInUseProtection: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, testCase := range satisfyingTestCases {
|
for name, testCase := range satisfyingTestCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
|
|
||||||
|
|
||||||
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
|
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
|
||||||
// expected to match but got an error
|
// expected to match but got an error
|
||||||
if err != nil && testCase.isExpectedMatch {
|
if err != nil && testCase.isExpectedMatch {
|
||||||
@ -1265,25 +1257,20 @@ func TestStorageObjectInUseProtectionFiltering(t *testing.T) {
|
|||||||
isExpectedMatch bool
|
isExpectedMatch bool
|
||||||
vol persistentVolumeOrderedIndex
|
vol persistentVolumeOrderedIndex
|
||||||
pvc *v1.PersistentVolumeClaim
|
pvc *v1.PersistentVolumeClaim
|
||||||
enableStorageObjectInUseProtection bool
|
|
||||||
}{
|
}{
|
||||||
"pv deletionTimeStamp not set": {
|
"pv deletionTimeStamp not set": {
|
||||||
isExpectedMatch: true,
|
isExpectedMatch: true,
|
||||||
vol: createTestVolOrderedIndex(pv),
|
vol: createTestVolOrderedIndex(pv),
|
||||||
pvc: pvc,
|
pvc: pvc,
|
||||||
enableStorageObjectInUseProtection: true,
|
|
||||||
},
|
},
|
||||||
"pv deletionTimeStamp set": {
|
"pv deletionTimeStamp set": {
|
||||||
isExpectedMatch: false,
|
isExpectedMatch: false,
|
||||||
vol: createTestVolOrderedIndex(pvToDelete),
|
vol: createTestVolOrderedIndex(pvToDelete),
|
||||||
pvc: pvc,
|
pvc: pvc,
|
||||||
enableStorageObjectInUseProtection: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for name, testCase := range filteringTestCases {
|
for name, testCase := range filteringTestCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
|
|
||||||
|
|
||||||
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
|
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
|
||||||
// expected to match but either got an error or no returned pvmatch
|
// expected to match but either got an error or no returned pvmatch
|
||||||
if pvmatch == nil && testCase.isExpectedMatch {
|
if pvmatch == nil && testCase.isExpectedMatch {
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
@ -45,7 +44,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||||
@ -275,11 +273,9 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
|
|||||||
requestedSize := requestedQty.Value()
|
requestedSize := requestedQty.Value()
|
||||||
|
|
||||||
// check if PV's DeletionTimeStamp is set, if so, return error.
|
// check if PV's DeletionTimeStamp is set, if so, return error.
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
|
||||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||||
return fmt.Errorf("the volume is marked for deletion %q", volume.Name)
|
return fmt.Errorf("the volume is marked for deletion %q", volume.Name)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||||
volumeSize := volumeQty.Value()
|
volumeSize := volumeQty.Value()
|
||||||
|
@ -25,12 +25,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
"k8s.io/client-go/tools/reference"
|
"k8s.io/client-go/tools/reference"
|
||||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -227,11 +225,9 @@ func FindMatchingVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if PV's DeletionTimeStamp is set, if so, skip this volume.
|
// check if PV's DeletionTimeStamp is set, if so, skip this volume.
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
|
||||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
nodeAffinityValid := true
|
nodeAffinityValid := true
|
||||||
if node != nil {
|
if node != nil {
|
||||||
|
@ -53,17 +53,13 @@ type Controller struct {
|
|||||||
podIndexer cache.Indexer
|
podIndexer cache.Indexer
|
||||||
|
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
|
|
||||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
|
||||||
storageObjectInUseProtectionEnabled bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) (*Controller, error) {
|
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) {
|
||||||
e := &Controller{
|
e := &Controller{
|
||||||
client: cl,
|
client: cl,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
|
||||||
}
|
}
|
||||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||||
@ -189,10 +185,6 @@ func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeClaim) error {
|
func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeClaim) error {
|
||||||
// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
|
|
||||||
if !c.storageObjectInUseProtectionEnabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
claimClone := pvc.DeepCopy()
|
claimClone := pvc.DeepCopy()
|
||||||
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
|
||||||
|
@ -186,30 +186,21 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
// List of expected kubeclient actions that should happen during the
|
// List of expected kubeclient actions that should happen during the
|
||||||
// test.
|
// test.
|
||||||
expectedActions []clienttesting.Action
|
expectedActions []clienttesting.Action
|
||||||
storageObjectInUseProtectionEnabled bool
|
|
||||||
}{
|
}{
|
||||||
//
|
//
|
||||||
// PVC events
|
// PVC events
|
||||||
//
|
//
|
||||||
{
|
{
|
||||||
name: "StorageObjectInUseProtection Enabled, PVC without finalizer -> finalizer is added",
|
name: "PVC without finalizer -> finalizer is added",
|
||||||
updatedPVC: pvc(),
|
updatedPVC: pvc(),
|
||||||
expectedActions: []clienttesting.Action{
|
expectedActions: []clienttesting.Action{
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "StorageObjectInUseProtection Disabled, PVC without finalizer -> finalizer is not added",
|
|
||||||
updatedPVC: pvc(),
|
|
||||||
expectedActions: []clienttesting.Action{},
|
|
||||||
storageObjectInUseProtectionEnabled: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PVC with finalizer -> no action",
|
name: "PVC with finalizer -> no action",
|
||||||
updatedPVC: withProtectionFinalizer(pvc()),
|
updatedPVC: withProtectionFinalizer(pvc()),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "saving PVC finalizer fails -> controller retries",
|
name: "saving PVC finalizer fails -> controller retries",
|
||||||
@ -229,25 +220,14 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
// This succeeds
|
// This succeeds
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "StorageObjectInUseProtection Enabled, deleted PVC with finalizer -> finalizer is removed",
|
name: "deleted PVC with finalizer -> finalizer is removed",
|
||||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
expectedActions: []clienttesting.Action{
|
expectedActions: []clienttesting.Action{
|
||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "StorageObjectInUseProtection Disabled, deleted PVC with finalizer -> finalizer is removed",
|
|
||||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
|
||||||
expectedActions: []clienttesting.Action{
|
|
||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
|
||||||
},
|
|
||||||
storageObjectInUseProtectionEnabled: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "finalizer removal fails -> controller retries",
|
name: "finalizer removal fails -> controller retries",
|
||||||
@ -270,7 +250,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
// Succeeds
|
// Succeeds
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted PVC with finalizer + pod with the PVC exists -> finalizer is not removed",
|
name: "deleted PVC with finalizer + pod with the PVC exists -> finalizer is not removed",
|
||||||
@ -290,7 +269,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted PVC with finalizer + pod with the PVC finished but is not deleted -> finalizer is not removed",
|
name: "deleted PVC with finalizer + pod with the PVC finished but is not deleted -> finalizer is not removed",
|
||||||
@ -299,7 +277,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted PVC with finalizer + pod with the PVC exists but is not in the Informer's cache yet -> finalizer is not removed",
|
name: "deleted PVC with finalizer + pod with the PVC exists but is not in the Informer's cache yet -> finalizer is not removed",
|
||||||
@ -311,7 +288,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
expectedActions: []clienttesting.Action{
|
expectedActions: []clienttesting.Action{
|
||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
//
|
//
|
||||||
// Pod events
|
// Pod events
|
||||||
@ -323,7 +299,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "updated finished Pod -> finalizer is not removed",
|
name: "updated finished Pod -> finalizer is not removed",
|
||||||
@ -332,7 +307,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "updated unscheduled Pod -> finalizer is removed",
|
name: "updated unscheduled Pod -> finalizer is removed",
|
||||||
@ -344,7 +318,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted running Pod -> finalizer is removed",
|
name: "deleted running Pod -> finalizer is removed",
|
||||||
@ -356,7 +329,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pod delete and create with same namespaced name seen as an update, old pod used deleted PVC -> finalizer is removed",
|
name: "pod delete and create with same namespaced name seen as an update, old pod used deleted PVC -> finalizer is removed",
|
||||||
@ -369,7 +341,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pod delete and create with same namespaced name seen as an update, old pod used non-deleted PVC -> finalizer is not removed",
|
name: "pod delete and create with same namespaced name seen as an update, old pod used non-deleted PVC -> finalizer is not removed",
|
||||||
@ -379,7 +350,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
deletedPod: withPVC(defaultPVCName, pod()),
|
deletedPod: withPVC(defaultPVCName, pod()),
|
||||||
updatedPod: withUID("uid2", pod()),
|
updatedPod: withUID("uid2", pod()),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pod delete and create with same namespaced name seen as an update, both pods reference deleted PVC -> finalizer is not removed",
|
name: "pod delete and create with same namespaced name seen as an update, both pods reference deleted PVC -> finalizer is not removed",
|
||||||
@ -389,7 +359,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
deletedPod: withPVC(defaultPVCName, pod()),
|
deletedPod: withPVC(defaultPVCName, pod()),
|
||||||
updatedPod: withUID("uid2", withPVC(defaultPVCName, pod())),
|
updatedPod: withUID("uid2", withPVC(defaultPVCName, pod())),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pod update from unscheduled to scheduled, deleted PVC is referenced -> finalizer is not removed",
|
name: "pod update from unscheduled to scheduled, deleted PVC is referenced -> finalizer is not removed",
|
||||||
@ -399,7 +368,6 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
deletedPod: unscheduled(withPVC(defaultPVCName, pod())),
|
deletedPod: unscheduled(withPVC(defaultPVCName, pod())),
|
||||||
updatedPod: withPVC(defaultPVCName, pod()),
|
updatedPod: withPVC(defaultPVCName, pod()),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +399,7 @@ func TestPVCProtectionController(t *testing.T) {
|
|||||||
podInformer := informers.Core().V1().Pods()
|
podInformer := informers.Core().V1().Pods()
|
||||||
|
|
||||||
// Create the controller
|
// Create the controller
|
||||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled)
|
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -47,17 +47,13 @@ type Controller struct {
|
|||||||
pvListerSynced cache.InformerSynced
|
pvListerSynced cache.InformerSynced
|
||||||
|
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
|
|
||||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
|
||||||
storageObjectInUseProtectionEnabled bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPVProtectionController returns a new *Controller.
|
// NewPVProtectionController returns a new *Controller.
|
||||||
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller {
|
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
|
||||||
e := &Controller{
|
e := &Controller{
|
||||||
client: cl,
|
client: cl,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
|
||||||
}
|
}
|
||||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolume_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolume_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||||
@ -158,10 +154,6 @@ func (c *Controller) processPV(ctx context.Context, pvName string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
|
func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
|
||||||
// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
|
|
||||||
if !c.storageObjectInUseProtectionEnabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
pvClone := pv.DeepCopy()
|
pvClone := pv.DeepCopy()
|
||||||
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
||||||
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
|
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -113,29 +113,20 @@ func TestPVProtectionController(t *testing.T) {
|
|||||||
// List of expected kubeclient actions that should happen during the
|
// List of expected kubeclient actions that should happen during the
|
||||||
// test.
|
// test.
|
||||||
expectedActions []clienttesting.Action
|
expectedActions []clienttesting.Action
|
||||||
storageObjectInUseProtectionEnabled bool
|
|
||||||
}{
|
}{
|
||||||
// PV events
|
// PV events
|
||||||
//
|
//
|
||||||
{
|
{
|
||||||
name: "StorageObjectInUseProtection Enabled, PV without finalizer -> finalizer is added",
|
name: "PV without finalizer -> finalizer is added",
|
||||||
updatedPV: pv(),
|
updatedPV: pv(),
|
||||||
expectedActions: []clienttesting.Action{
|
expectedActions: []clienttesting.Action{
|
||||||
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "StorageObjectInUseProtection Disabled, PV without finalizer -> finalizer is added",
|
|
||||||
updatedPV: pv(),
|
|
||||||
expectedActions: []clienttesting.Action{},
|
|
||||||
storageObjectInUseProtectionEnabled: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PVC with finalizer -> no action",
|
name: "PVC with finalizer -> no action",
|
||||||
updatedPV: withProtectionFinalizer(pv()),
|
updatedPV: withProtectionFinalizer(pv()),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "saving PVC finalizer fails -> controller retries",
|
name: "saving PVC finalizer fails -> controller retries",
|
||||||
@ -155,23 +146,13 @@ func TestPVProtectionController(t *testing.T) {
|
|||||||
// This succeeds
|
// This succeeds
|
||||||
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "StorageObjectInUseProtection Enabled, deleted PV with finalizer -> finalizer is removed",
|
name: "deleted PV with finalizer -> finalizer is removed",
|
||||||
updatedPV: deleted(withProtectionFinalizer(pv())),
|
updatedPV: deleted(withProtectionFinalizer(pv())),
|
||||||
expectedActions: []clienttesting.Action{
|
expectedActions: []clienttesting.Action{
|
||||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "StorageObjectInUseProtection Disabled, deleted PV with finalizer -> finalizer is removed",
|
|
||||||
updatedPV: deleted(withProtectionFinalizer(pv())),
|
|
||||||
expectedActions: []clienttesting.Action{
|
|
||||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
|
||||||
},
|
|
||||||
storageObjectInUseProtectionEnabled: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "finalizer removal fails -> controller retries",
|
name: "finalizer removal fails -> controller retries",
|
||||||
@ -191,13 +172,11 @@ func TestPVProtectionController(t *testing.T) {
|
|||||||
// Succeeds
|
// Succeeds
|
||||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||||
},
|
},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed",
|
name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed",
|
||||||
updatedPV: deleted(withProtectionFinalizer(boundPV())),
|
updatedPV: deleted(withProtectionFinalizer(boundPV())),
|
||||||
expectedActions: []clienttesting.Action{},
|
expectedActions: []clienttesting.Action{},
|
||||||
storageObjectInUseProtectionEnabled: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +210,7 @@ func TestPVProtectionController(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the controller
|
// Create the controller
|
||||||
ctrl := NewPVProtectionController(pvInformer, client, test.storageObjectInUseProtectionEnabled)
|
ctrl := NewPVProtectionController(pvInformer, client)
|
||||||
|
|
||||||
// Start the test by simulating an event
|
// Start the test by simulating an event
|
||||||
if test.updatedPV != nil {
|
if test.updatedPV != nil {
|
||||||
|
@ -570,7 +570,6 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
|||||||
return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err)
|
return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
|
||||||
// Pods that uses a PVC that is being deleted must not be started.
|
// Pods that uses a PVC that is being deleted must not be started.
|
||||||
//
|
//
|
||||||
// In case an old kubelet is running without this check or some kubelets
|
// In case an old kubelet is running without this check or some kubelets
|
||||||
@ -582,7 +581,6 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
|||||||
if pvc.ObjectMeta.DeletionTimestamp != nil {
|
if pvc.ObjectMeta.DeletionTimestamp != nil {
|
||||||
return nil, errors.New("PVC is being deleted")
|
return nil, errors.New("PVC is being deleted")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if pvc.Status.Phase != v1.ClaimBound {
|
if pvc.Status.Phase != v1.ClaimBound {
|
||||||
return nil, errors.New("PVC is not bound")
|
return nil, errors.New("PVC is not bound")
|
||||||
|
@ -261,10 +261,9 @@ func NewVolumeBinder(
|
|||||||
// returned.
|
// returned.
|
||||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
|
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
|
||||||
podVolumes = &PodVolumes{}
|
podVolumes = &PodVolumes{}
|
||||||
podName := getPodName(pod)
|
|
||||||
|
|
||||||
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
|
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
|
||||||
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
|
klog.V(5).InfoS("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
|
|
||||||
// Initialize to true for pods that don't have volumes. These
|
// Initialize to true for pods that don't have volumes. These
|
||||||
// booleans get translated into reason strings when the function
|
// booleans get translated into reason strings when the function
|
||||||
@ -316,7 +315,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
|
|||||||
|
|
||||||
// Check PV node affinity on bound volumes
|
// Check PV node affinity on bound volumes
|
||||||
if len(boundClaims) > 0 {
|
if len(boundClaims) > 0 {
|
||||||
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, podName)
|
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -372,9 +371,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
|
|||||||
// 2. Update the pvcCache with the new PVCs with annotations set
|
// 2. Update the pvcCache with the new PVCs with annotations set
|
||||||
// 3. Update PodVolumes again with cached API updates for PVs and PVCs.
|
// 3. Update PodVolumes again with cached API updates for PVs and PVCs.
|
||||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) {
|
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) {
|
||||||
podName := getPodName(assumedPod)
|
klog.V(4).InfoS("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
|
||||||
|
|
||||||
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
|
metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
|
||||||
@ -382,7 +379,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if allBound := b.arePodVolumesBound(assumedPod); allBound {
|
if allBound := b.arePodVolumesBound(assumedPod); allBound {
|
||||||
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
|
klog.V(4).InfoS("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -390,14 +387,15 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod
|
|||||||
newBindings := []*BindingInfo{}
|
newBindings := []*BindingInfo{}
|
||||||
for _, binding := range podVolumes.StaticBindings {
|
for _, binding := range podVolumes.StaticBindings {
|
||||||
newPV, dirty, err := pvutil.GetBindVolumeToClaim(binding.pv, binding.pvc)
|
newPV, dirty, err := pvutil.GetBindVolumeToClaim(binding.pv, binding.pvc)
|
||||||
klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
klog.V(5).InfoS("AssumePodVolumes: GetBindVolumeToClaim",
|
||||||
podName,
|
"pod", klog.KObj(assumedPod),
|
||||||
binding.pv.Name,
|
"PV", klog.KObj(binding.pv),
|
||||||
binding.pvc.Name,
|
"PVC", klog.KObj(binding.pvc),
|
||||||
newPV,
|
"newPV", klog.KObj(newPV),
|
||||||
dirty,
|
"dirty", dirty,
|
||||||
err)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "AssumePodVolumes: fail to GetBindVolumeToClaim")
|
||||||
b.revertAssumedPVs(newBindings)
|
b.revertAssumedPVs(newBindings)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -444,8 +442,7 @@ func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) {
|
|||||||
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
|
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
|
||||||
// by the PV controller.
|
// by the PV controller.
|
||||||
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) {
|
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) {
|
||||||
podName := getPodName(assumedPod)
|
klog.V(4).InfoS("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName))
|
||||||
klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -457,7 +454,7 @@ func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes
|
|||||||
claimsToProvision := podVolumes.DynamicProvisions
|
claimsToProvision := podVolumes.DynamicProvisions
|
||||||
|
|
||||||
// Start API operations
|
// Start API operations
|
||||||
err = b.bindAPIUpdate(podName, bindings, claimsToProvision)
|
err = b.bindAPIUpdate(assumedPod, bindings, claimsToProvision)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -481,7 +478,8 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// bindAPIUpdate makes the API update for those PVs/PVCs.
|
// bindAPIUpdate makes the API update for those PVs/PVCs.
|
||||||
func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
|
func (b *volumeBinder) bindAPIUpdate(pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
|
||||||
|
podName := getPodName(pod)
|
||||||
if bindings == nil {
|
if bindings == nil {
|
||||||
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
||||||
}
|
}
|
||||||
@ -511,16 +509,15 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, cl
|
|||||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||||
// There is no API rollback if the actual binding fails
|
// There is no API rollback if the actual binding fails
|
||||||
for _, binding = range bindings {
|
for _, binding = range bindings {
|
||||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
|
klog.V(5).InfoS("bindAPIUpdate: binding PV to PVC", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
|
||||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||||
claimKey := getPVCName(binding.pvc)
|
klog.V(2).InfoS("Claim bound to volume", "PVC", klog.KObj(binding.pvc), "PV", klog.KObj(binding.pv))
|
||||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
|
||||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
|
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
klog.V(4).InfoS("Updating PersistentVolume: binding to claim failed", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey)
|
klog.V(4).InfoS("Updating PersistentVolume: bound to claim", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
|
||||||
// Save updated object from apiserver for later checking.
|
// Save updated object from apiserver for later checking.
|
||||||
binding.pv = newPV
|
binding.pv = newPV
|
||||||
lastProcessedBinding++
|
lastProcessedBinding++
|
||||||
@ -529,7 +526,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, cl
|
|||||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||||
// PV controller is expected to signal back by removing related annotations if actual provisioning fails
|
// PV controller is expected to signal back by removing related annotations if actual provisioning fails
|
||||||
for i, claim = range claimsToProvision {
|
for i, claim = range claimsToProvision {
|
||||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
klog.V(5).InfoS("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim))
|
||||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
|
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -573,7 +570,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
|||||||
csiNode, err := b.csiNodeLister.Get(node.Name)
|
csiNode, err := b.csiNodeLister.Get(node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: return the error once CSINode is created by default
|
// TODO: return the error once CSINode is created by default
|
||||||
klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err)
|
klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for any conditions that might require scheduling retry
|
// Check for any conditions that might require scheduling retry
|
||||||
@ -585,7 +582,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
|||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return false, fmt.Errorf("pod does not exist any more: %w", err)
|
return false, fmt.Errorf("pod does not exist any more: %w", err)
|
||||||
}
|
}
|
||||||
klog.Errorf("failed to get pod %s/%s from the lister: %v", pod.Namespace, pod.Name, err)
|
klog.ErrorS(err, "Failed to get pod from the lister", "pod", klog.KObj(pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, binding := range bindings {
|
for _, binding := range bindings {
|
||||||
@ -681,7 +678,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
|||||||
}
|
}
|
||||||
|
|
||||||
// All pvs and pvcs that we operated on are bound
|
// All pvs and pvcs that we operated on are bound
|
||||||
klog.V(4).Infof("All PVCs for pod %q are bound", podName)
|
klog.V(4).InfoS("All PVCs for pod are bound", "pod", klog.KObj(pod))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -725,12 +722,12 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste
|
|||||||
|
|
||||||
fullyBound := b.isPVCFullyBound(pvc)
|
fullyBound := b.isPVCFullyBound(pvc)
|
||||||
if fullyBound {
|
if fullyBound {
|
||||||
klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvc.Spec.VolumeName)
|
klog.V(5).InfoS("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
|
||||||
} else {
|
} else {
|
||||||
if pvc.Spec.VolumeName != "" {
|
if pvc.Spec.VolumeName != "" {
|
||||||
klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvc.Spec.VolumeName)
|
klog.V(5).InfoS("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
|
||||||
} else {
|
} else {
|
||||||
klog.V(5).Infof("PVC %q is not bound", pvcKey)
|
klog.V(5).InfoS("PVC is not bound", "PVC", klog.KObj(pvc))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fullyBound, pvc, nil
|
return fullyBound, pvc, nil
|
||||||
@ -787,11 +784,11 @@ func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
|||||||
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
|
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, bool, error) {
|
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) {
|
||||||
csiNode, err := b.csiNodeLister.Get(node.Name)
|
csiNode, err := b.csiNodeLister.Get(node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: return the error once CSINode is created by default
|
// TODO: return the error once CSINode is created by default
|
||||||
klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err)
|
klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pvc := range claims {
|
for _, pvc := range claims {
|
||||||
@ -811,20 +808,19 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
|
|||||||
|
|
||||||
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
|
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err)
|
klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err)
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
|
klog.V(5).InfoS("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
|
klog.V(4).InfoS("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
return true, true, nil
|
return true, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// findMatchingVolumes tries to find matching volumes for given claims,
|
// findMatchingVolumes tries to find matching volumes for given claims,
|
||||||
// and return unbound claims for further provision.
|
// and return unbound claims for further provision.
|
||||||
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
|
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
|
||||||
podName := getPodName(pod)
|
|
||||||
// Sort all the claims by increasing size request to get the smallest fits
|
// Sort all the claims by increasing size request to get the smallest fits
|
||||||
sort.Sort(byPVCSize(claimsToBind))
|
sort.Sort(byPVCSize(claimsToBind))
|
||||||
|
|
||||||
@ -836,7 +832,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
|||||||
// Get storage class name from each PVC
|
// Get storage class name from each PVC
|
||||||
storageClassName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
|
storageClassName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
|
||||||
allPVs := b.pvCache.ListPVs(storageClassName)
|
allPVs := b.pvCache.ListPVs(storageClassName)
|
||||||
pvcName := getPVCName(pvc)
|
|
||||||
|
|
||||||
// Find a matching PV
|
// Find a matching PV
|
||||||
pv, err := pvutil.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true)
|
pv, err := pvutil.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true)
|
||||||
@ -844,7 +839,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
|||||||
return false, nil, nil, err
|
return false, nil, nil, err
|
||||||
}
|
}
|
||||||
if pv == nil {
|
if pv == nil {
|
||||||
klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name)
|
klog.V(4).InfoS("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node))
|
||||||
unboundClaims = append(unboundClaims, pvc)
|
unboundClaims = append(unboundClaims, pvc)
|
||||||
foundMatches = false
|
foundMatches = false
|
||||||
continue
|
continue
|
||||||
@ -853,11 +848,11 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
|||||||
// matching PV needs to be excluded so we don't select it again
|
// matching PV needs to be excluded so we don't select it again
|
||||||
chosenPVs[pv.Name] = pv
|
chosenPVs[pv.Name] = pv
|
||||||
bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc})
|
bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc})
|
||||||
klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName)
|
klog.V(5).InfoS("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
if foundMatches {
|
if foundMatches {
|
||||||
klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
|
klog.V(4).InfoS("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -867,7 +862,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
|||||||
// findMatchingVolumes, and do not have matching volumes for binding), and return true
|
// findMatchingVolumes, and do not have matching volumes for binding), and return true
|
||||||
// if all of the claims are eligible for dynamic provision.
|
// if all of the claims are eligible for dynamic provision.
|
||||||
func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) {
|
func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) {
|
||||||
podName := getPodName(pod)
|
|
||||||
dynamicProvisions = []*v1.PersistentVolumeClaim{}
|
dynamicProvisions = []*v1.PersistentVolumeClaim{}
|
||||||
|
|
||||||
// We return early with provisionedClaims == nil if a check
|
// We return early with provisionedClaims == nil if a check
|
||||||
@ -885,13 +879,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
|||||||
}
|
}
|
||||||
provisioner := class.Provisioner
|
provisioner := class.Provisioner
|
||||||
if provisioner == "" || provisioner == pvutil.NotSupportedProvisioner {
|
if provisioner == "" || provisioner == pvutil.NotSupportedProvisioner {
|
||||||
klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
|
klog.V(4).InfoS("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim))
|
||||||
return false, true, nil, nil
|
return false, true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the node can satisfy the topology requirement in the class
|
// Check if the node can satisfy the topology requirement in the class
|
||||||
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
||||||
klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName)
|
klog.V(4).InfoS("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim))
|
||||||
return false, true, nil, nil
|
return false, true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,7 +902,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
|||||||
dynamicProvisions = append(dynamicProvisions, claim)
|
dynamicProvisions = append(dynamicProvisions, claim)
|
||||||
|
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("Provisioning for %d claims of pod %q that has no matching volumes on node %q ...", len(claimsToProvision), podName, node.Name)
|
klog.V(4).InfoS("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||||
|
|
||||||
return true, true, dynamicProvisions, nil
|
return true, true, dynamicProvisions, nil
|
||||||
}
|
}
|
||||||
@ -974,8 +968,8 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten
|
|||||||
|
|
||||||
// TODO (?): this doesn't give any information about which pools where considered and why
|
// TODO (?): this doesn't give any information about which pools where considered and why
|
||||||
// they had to be rejected. Log that above? But that might be a lot of log output...
|
// they had to be rejected. Log that above? But that might be a lot of log output...
|
||||||
klog.V(4).Infof("Node %q has no accessible CSIStorageCapacity with enough capacity for PVC %s/%s of size %d and storage class %q",
|
klog.V(4).InfoS("Node has no accessible CSIStorageCapacity with enough capacity for PVC",
|
||||||
node.Name, claim.Namespace, claim.Name, sizeInBytes, storageClass.Name)
|
"node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -997,7 +991,7 @@ func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1beta1.CSI
|
|||||||
selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology)
|
selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This should never happen because NodeTopology must be valid.
|
// This should never happen because NodeTopology must be valid.
|
||||||
klog.Errorf("unexpected error converting %+v to a label selector: %v", capacity.NodeTopology, err)
|
klog.ErrorS(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return selector.Matches(labels.Set(node.Labels))
|
return selector.Matches(labels.Set(node.Labels))
|
||||||
|
@ -19,6 +19,7 @@ package volumebinding
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
@ -187,7 +188,8 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}, csiStorageCapacity ...b
|
|||||||
informerFactory.Start(stopCh)
|
informerFactory.Start(stopCh)
|
||||||
for v, synced := range informerFactory.WaitForCacheSync(stopCh) {
|
for v, synced := range informerFactory.WaitForCacheSync(stopCh) {
|
||||||
if !synced {
|
if !synced {
|
||||||
klog.Fatalf("Error syncing informer for %v", v)
|
klog.ErrorS(nil, "Error syncing informer", "informer", v)
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1528,7 +1530,7 @@ func TestBindAPIUpdate(t *testing.T) {
|
|||||||
testEnv.assumeVolumes(t, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
testEnv.assumeVolumes(t, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
||||||
|
|
||||||
// Execute
|
// Execute
|
||||||
err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs)
|
err := testEnv.internalBinder.bindAPIUpdate(pod, scenario.bindings, scenario.provisionedPVCs)
|
||||||
|
|
||||||
// Validate
|
// Validate
|
||||||
if !scenario.shouldFail && err != nil {
|
if !scenario.shouldFail && err != nil {
|
||||||
@ -2069,7 +2071,7 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
go func(scenario scenarioType) {
|
go func(scenario scenarioType) {
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
// Sleep a while to run after bindAPIUpdate in BindPodVolumes
|
// Sleep a while to run after bindAPIUpdate in BindPodVolumes
|
||||||
klog.V(5).Infof("Running delay function")
|
klog.V(5).InfoS("Running delay function")
|
||||||
scenario.delayFunc(t, testEnv, pod, scenario.initPVs, scenario.initPVCs)
|
scenario.delayFunc(t, testEnv, pod, scenario.initPVs, scenario.initPVCs)
|
||||||
}(scenario)
|
}(scenario)
|
||||||
}
|
}
|
||||||
|
@ -21,11 +21,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"k8s.io/apiserver/pkg/admission"
|
"k8s.io/apiserver/pkg/admission"
|
||||||
"k8s.io/apiserver/pkg/admission/initializer"
|
|
||||||
"k8s.io/component-base/featuregate"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -45,12 +42,9 @@ func Register(plugins *admission.Plugins) {
|
|||||||
// storageProtectionPlugin holds state for and implements the admission plugin.
|
// storageProtectionPlugin holds state for and implements the admission plugin.
|
||||||
type storageProtectionPlugin struct {
|
type storageProtectionPlugin struct {
|
||||||
*admission.Handler
|
*admission.Handler
|
||||||
|
|
||||||
storageObjectInUseProtection bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ admission.Interface = &storageProtectionPlugin{}
|
var _ admission.Interface = &storageProtectionPlugin{}
|
||||||
var _ initializer.WantsFeatures = &storageProtectionPlugin{}
|
|
||||||
|
|
||||||
// newPlugin creates a new admission plugin.
|
// newPlugin creates a new admission plugin.
|
||||||
func newPlugin() *storageProtectionPlugin {
|
func newPlugin() *storageProtectionPlugin {
|
||||||
@ -70,10 +64,6 @@ var (
|
|||||||
// This prevents users from deleting a PVC that's used by a running pod.
|
// This prevents users from deleting a PVC that's used by a running pod.
|
||||||
// This also prevents admin from deleting a PV that's bound by a PVC
|
// This also prevents admin from deleting a PV that's bound by a PVC
|
||||||
func (c *storageProtectionPlugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error {
|
func (c *storageProtectionPlugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error {
|
||||||
if !c.storageObjectInUseProtection {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch a.GetResource().GroupResource() {
|
switch a.GetResource().GroupResource() {
|
||||||
case pvResource:
|
case pvResource:
|
||||||
return c.admitPV(a)
|
return c.admitPV(a)
|
||||||
@ -129,11 +119,3 @@ func (c *storageProtectionPlugin) admitPVC(a admission.Attributes) error {
|
|||||||
pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer)
|
pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *storageProtectionPlugin) InspectFeatureGates(featureGates featuregate.FeatureGate) {
|
|
||||||
c.storageObjectInUseProtection = featureGates.Enabled(features.StorageObjectInUseProtection)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *storageProtectionPlugin) ValidateInitialization() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -61,7 +61,6 @@ func TestAdmit(t *testing.T) {
|
|||||||
resource schema.GroupVersionResource
|
resource schema.GroupVersionResource
|
||||||
object runtime.Object
|
object runtime.Object
|
||||||
expectedObject runtime.Object
|
expectedObject runtime.Object
|
||||||
featureEnabled bool
|
|
||||||
namespace string
|
namespace string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -69,7 +68,6 @@ func TestAdmit(t *testing.T) {
|
|||||||
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
||||||
claim,
|
claim,
|
||||||
claimWithFinalizer,
|
claimWithFinalizer,
|
||||||
true,
|
|
||||||
claim.Namespace,
|
claim.Namespace,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -77,23 +75,13 @@ func TestAdmit(t *testing.T) {
|
|||||||
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
||||||
claimWithFinalizer,
|
claimWithFinalizer,
|
||||||
claimWithFinalizer,
|
claimWithFinalizer,
|
||||||
true,
|
|
||||||
claimWithFinalizer.Namespace,
|
claimWithFinalizer.Namespace,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"disabled feature -> no finalizer",
|
|
||||||
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
|
||||||
claim,
|
|
||||||
claim,
|
|
||||||
false,
|
|
||||||
claim.Namespace,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"create -> add finalizer",
|
"create -> add finalizer",
|
||||||
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
||||||
pv,
|
pv,
|
||||||
pvWithFinalizer,
|
pvWithFinalizer,
|
||||||
true,
|
|
||||||
pv.Namespace,
|
pv.Namespace,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -101,23 +89,13 @@ func TestAdmit(t *testing.T) {
|
|||||||
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
||||||
pvWithFinalizer,
|
pvWithFinalizer,
|
||||||
pvWithFinalizer,
|
pvWithFinalizer,
|
||||||
true,
|
|
||||||
pvWithFinalizer.Namespace,
|
pvWithFinalizer.Namespace,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"disabled feature -> no finalizer",
|
|
||||||
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
|
||||||
pv,
|
|
||||||
pv,
|
|
||||||
false,
|
|
||||||
pv.Namespace,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
ctrl := newPlugin()
|
ctrl := newPlugin()
|
||||||
ctrl.storageObjectInUseProtection = test.featureEnabled
|
|
||||||
|
|
||||||
obj := test.object.DeepCopyObject()
|
obj := test.object.DeepCopyObject()
|
||||||
attrs := admission.NewAttributesRecord(
|
attrs := admission.NewAttributesRecord(
|
||||||
|
@ -202,7 +202,6 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
|||||||
etcdImage := imageutils.GetE2EImage(imageutils.Etcd)
|
etcdImage := imageutils.GetE2EImage(imageutils.Etcd)
|
||||||
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
||||||
replicas := int32(1)
|
replicas := int32(1)
|
||||||
zero := int64(0)
|
|
||||||
etcdLocalhostAddress := "127.0.0.1"
|
etcdLocalhostAddress := "127.0.0.1"
|
||||||
if framework.TestContext.ClusterIsIPv6() {
|
if framework.TestContext.ClusterIsIPv6() {
|
||||||
etcdLocalhostAddress = "::1"
|
etcdLocalhostAddress = "::1"
|
||||||
@ -250,31 +249,10 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &appsv1.Deployment{
|
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
d.Spec.Template.Spec.Containers = containers
|
||||||
Name: deploymentName,
|
d.Spec.Template.Spec.Volumes = volumes
|
||||||
Labels: podLabels,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Replicas: &replicas,
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: podLabels,
|
|
||||||
},
|
|
||||||
Strategy: appsv1.DeploymentStrategy{
|
|
||||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: podLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
TerminationGracePeriodSeconds: &zero,
|
|
||||||
Containers: containers,
|
|
||||||
Volumes: volumes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||||
|
|
||||||
|
@ -266,7 +266,6 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
|||||||
// Create the deployment of the webhook
|
// Create the deployment of the webhook
|
||||||
podLabels := map[string]string{"app": "sample-crd-conversion-webhook", "crd-webhook": "true"}
|
podLabels := map[string]string{"app": "sample-crd-conversion-webhook", "crd-webhook": "true"}
|
||||||
replicas := int32(1)
|
replicas := int32(1)
|
||||||
zero := int64(0)
|
|
||||||
mounts := []v1.VolumeMount{
|
mounts := []v1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: "crd-conversion-webhook-certs",
|
Name: "crd-conversion-webhook-certs",
|
||||||
@ -311,31 +310,10 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
|||||||
Ports: []v1.ContainerPort{{ContainerPort: containerPort}},
|
Ports: []v1.ContainerPort{{ContainerPort: containerPort}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &appsv1.Deployment{
|
d := e2edeployment.NewDeployment(deploymentCRDName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
d.Spec.Template.Spec.Containers = containers
|
||||||
Name: deploymentCRDName,
|
d.Spec.Template.Spec.Volumes = volumes
|
||||||
Labels: podLabels,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Replicas: &replicas,
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: podLabels,
|
|
||||||
},
|
|
||||||
Strategy: appsv1.DeploymentStrategy{
|
|
||||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: podLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
TerminationGracePeriodSeconds: &zero,
|
|
||||||
Containers: containers,
|
|
||||||
Volumes: volumes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
|
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
@ -126,20 +127,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment {
|
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment {
|
||||||
replicas := int32(2)
|
return e2edeployment.NewDeployment(deploymentName, 2, labels, "nginx", imageutils.GetE2EImage(imageutils.Nginx), appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
return &appsv1.Deployment{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: deploymentName,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Replicas: &replicas,
|
|
||||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
|
||||||
Strategy: appsv1.DeploymentStrategy{
|
|
||||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
|
||||||
},
|
|
||||||
Template: getPodTemplateSpec(labels),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
|
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
|
||||||
|
@ -767,7 +767,6 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
|
|||||||
// Create the deployment of the webhook
|
// Create the deployment of the webhook
|
||||||
podLabels := map[string]string{"app": "sample-webhook", "webhook": "true"}
|
podLabels := map[string]string{"app": "sample-webhook", "webhook": "true"}
|
||||||
replicas := int32(1)
|
replicas := int32(1)
|
||||||
zero := int64(0)
|
|
||||||
mounts := []v1.VolumeMount{
|
mounts := []v1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: "webhook-certs",
|
Name: "webhook-certs",
|
||||||
@ -812,31 +811,10 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
|
|||||||
Ports: []v1.ContainerPort{{ContainerPort: containerPort}},
|
Ports: []v1.ContainerPort{{ContainerPort: containerPort}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d := &appsv1.Deployment{
|
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
d.Spec.Template.Spec.Containers = containers
|
||||||
Name: deploymentName,
|
d.Spec.Template.Spec.Volumes = volumes
|
||||||
Labels: podLabels,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Replicas: &replicas,
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: podLabels,
|
|
||||||
},
|
|
||||||
Strategy: appsv1.DeploymentStrategy{
|
|
||||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: podLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
TerminationGracePeriodSeconds: &zero,
|
|
||||||
Containers: containers,
|
|
||||||
Volumes: volumes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||||
ginkgo.By("Wait for the deployment to be ready")
|
ginkgo.By("Wait for the deployment to be ready")
|
||||||
|
@ -192,9 +192,6 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
testDeploymentNoReplicas := int32(0)
|
testDeploymentNoReplicas := int32(0)
|
||||||
testDeploymentLabels := map[string]string{"test-deployment-static": "true"}
|
testDeploymentLabels := map[string]string{"test-deployment-static": "true"}
|
||||||
testDeploymentLabelsFlat := "test-deployment-static=true"
|
testDeploymentLabelsFlat := "test-deployment-static=true"
|
||||||
testDeploymentLabelSelectors := metav1.LabelSelector{
|
|
||||||
MatchLabels: testDeploymentLabels,
|
|
||||||
}
|
|
||||||
w := &cache.ListWatch{
|
w := &cache.ListWatch{
|
||||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||||
options.LabelSelector = testDeploymentLabelsFlat
|
options.LabelSelector = testDeploymentLabelsFlat
|
||||||
@ -205,29 +202,13 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
framework.ExpectNoError(err, "failed to list Deployments")
|
framework.ExpectNoError(err, "failed to list Deployments")
|
||||||
|
|
||||||
ginkgo.By("creating a Deployment")
|
ginkgo.By("creating a Deployment")
|
||||||
testDeployment := appsv1.Deployment{
|
testDeployment := e2edeployment.NewDeployment(
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
testDeploymentName, testDeploymentDefaultReplicas, testDeploymentLabels,
|
||||||
Name: testDeploymentName,
|
testDeploymentName, testDeploymentInitialImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
Labels: map[string]string{"test-deployment-static": "true"},
|
testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"}
|
||||||
},
|
testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Replicas: &testDeploymentDefaultReplicas,
|
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), testDeployment, metav1.CreateOptions{})
|
||||||
Selector: &testDeploymentLabelSelectors,
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: testDeploymentLabelSelectors.MatchLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
TerminationGracePeriodSeconds: &one,
|
|
||||||
Containers: []v1.Container{{
|
|
||||||
Name: testDeploymentName,
|
|
||||||
Image: testDeploymentInitialImage,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), &testDeployment, metav1.CreateOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
|
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
|
||||||
|
|
||||||
ginkgo.By("waiting for Deployment to be created")
|
ginkgo.By("waiting for Deployment to be created")
|
||||||
|
@ -57,6 +57,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -1101,35 +1102,14 @@ func generateBacksideHTTPSServiceSpec() *v1.Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment {
|
func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment {
|
||||||
return &appsv1.Deployment{
|
labels := map[string]string{"app": "echoheaders-https"}
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
d := e2edeployment.NewDeployment("echoheaders-https", 0, labels, "echoheaders-https", imageutils.GetE2EImage(imageutils.EchoServer), appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
Name: "echoheaders-https",
|
d.Spec.Replicas = nil
|
||||||
},
|
d.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
|
|
||||||
"app": "echoheaders-https",
|
|
||||||
}},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"app": "echoheaders-https",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "echoheaders-https",
|
|
||||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
|
||||||
Ports: []v1.ContainerPort{{
|
|
||||||
ContainerPort: 8443,
|
ContainerPort: 8443,
|
||||||
Name: "echo-443",
|
Name: "echo-443",
|
||||||
}},
|
}}
|
||||||
},
|
return d
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
gcm "google.golang.org/api/monitoring/v3"
|
gcm "google.golang.org/api/monitoring/v3"
|
||||||
@ -104,26 +105,10 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta
|
|||||||
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue))
|
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &appsv1.Deployment{
|
d := e2edeployment.NewDeployment(name, replicas, map[string]string{"name": name}, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
d.ObjectMeta.Namespace = namespace
|
||||||
Name: name,
|
d.Spec.Template.Spec = podSpec
|
||||||
Namespace: namespace,
|
return d
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"name": name},
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"name": name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: podSpec,
|
|
||||||
},
|
|
||||||
Replicas: &replicas,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StackdriverExporterPod is a Pod of simple application that exports a metric of fixed value to
|
// StackdriverExporterPod is a Pod of simple application that exports a metric of fixed value to
|
||||||
@ -188,26 +173,10 @@ func stackdriverExporterContainerSpec(name string, namespace string, metricName
|
|||||||
// one exposing a metric in prometheus format and second a prometheus-to-sd container
|
// one exposing a metric in prometheus format and second a prometheus-to-sd container
|
||||||
// that scrapes the metric and pushes it to stackdriver.
|
// that scrapes the metric and pushes it to stackdriver.
|
||||||
func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *appsv1.Deployment {
|
func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *appsv1.Deployment {
|
||||||
return &appsv1.Deployment{
|
d := e2edeployment.NewDeployment(name, replicas, map[string]string{"name": name}, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
d.ObjectMeta.Namespace = namespace
|
||||||
Name: name,
|
d.Spec.Template.Spec = prometheusExporterPodSpec(CustomMetricName, metricValue, 8080)
|
||||||
Namespace: namespace,
|
return d
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"name": name},
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"name": name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: prometheusExporterPodSpec(CustomMetricName, metricValue, 8080),
|
|
||||||
},
|
|
||||||
Replicas: &replicas,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) v1.PodSpec {
|
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) v1.PodSpec {
|
||||||
|
@ -68,39 +68,17 @@ func iperf2ServerDeployment(client clientset.Interface, namespace string, isIPV6
|
|||||||
if isIPV6 {
|
if isIPV6 {
|
||||||
args = append(args, "-V")
|
args = append(args, "-V")
|
||||||
}
|
}
|
||||||
deploymentSpec := &appsv1.Deployment{
|
deploymentSpec := e2edeployment.NewDeployment(
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
"iperf2-server-deployment", replicas, labels, "iperf2-server",
|
||||||
Name: "iperf2-server-deployment",
|
imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
Labels: labels,
|
deploymentSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||||
},
|
deploymentSpec.Spec.Template.Spec.Containers[0].Command = []string{"iperf"}
|
||||||
Spec: appsv1.DeploymentSpec{
|
deploymentSpec.Spec.Template.Spec.Containers[0].Args = args
|
||||||
Replicas: &replicas,
|
deploymentSpec.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: labels,
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
TerminationGracePeriodSeconds: &one,
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "iperf2-server",
|
|
||||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
|
||||||
Command: []string{"iperf"},
|
|
||||||
Args: args,
|
|
||||||
Ports: []v1.ContainerPort{
|
|
||||||
{
|
{
|
||||||
ContainerPort: iperf2Port,
|
ContainerPort: iperf2Port,
|
||||||
Protocol: v1.ProtocolTCP,
|
Protocol: v1.ProtocolTCP,
|
||||||
},
|
},
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
|
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
)
|
)
|
||||||
@ -445,24 +446,11 @@ func generateScaleTestServiceSpec(suffix string) *v1.Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deployment {
|
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deployment {
|
||||||
return &appsv1.Deployment{
|
d := e2edeployment.NewDeployment(
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
scaleTestBackendName, numReplicas, scaleTestLabels, scaleTestBackendName,
|
||||||
Name: scaleTestBackendName,
|
imageutils.GetE2EImage(imageutils.EchoServer), appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
},
|
d.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 8080}}
|
||||||
Spec: appsv1.DeploymentSpec{
|
d.Spec.Template.Spec.Containers[0].ReadinessProbe = &v1.Probe{
|
||||||
Replicas: &numReplicas,
|
|
||||||
Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: scaleTestLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: scaleTestBackendName,
|
|
||||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
|
||||||
Ports: []v1.ContainerPort{{ContainerPort: 8080}},
|
|
||||||
ReadinessProbe: &v1.Probe{
|
|
||||||
ProbeHandler: v1.ProbeHandler{
|
ProbeHandler: v1.ProbeHandler{
|
||||||
HTTPGet: &v1.HTTPGetAction{
|
HTTPGet: &v1.HTTPGetAction{
|
||||||
Port: intstr.FromInt(8080),
|
Port: intstr.FromInt(8080),
|
||||||
@ -473,11 +461,6 @@ func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deploymen
|
|||||||
PeriodSeconds: 1,
|
PeriodSeconds: 1,
|
||||||
SuccessThreshold: 1,
|
SuccessThreshold: 1,
|
||||||
TimeoutSeconds: 1,
|
TimeoutSeconds: 1,
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
return d
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user