mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
6de257e93f
@ -77,7 +77,7 @@ dependencies:
|
||||
match: const etcdImage
|
||||
|
||||
- name: "etcd-image"
|
||||
version: 3.5.0
|
||||
version: 3.5.1
|
||||
refPaths:
|
||||
- path: cluster/images/etcd/Makefile
|
||||
match: BUNDLED_ETCD_VERSIONS\?|LATEST_ETCD_VERSION\?
|
||||
|
@ -15,7 +15,7 @@
|
||||
# Build the etcd image
|
||||
#
|
||||
# Usage:
|
||||
# [BUNDLED_ETCD_VERSIONS=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.0] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push)
|
||||
# [BUNDLED_ETCD_VERSIONS=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.1] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push)
|
||||
#
|
||||
# The image contains different etcd versions to simplify
|
||||
# upgrades. Thus be careful when removing any versions from here.
|
||||
@ -26,15 +26,15 @@
|
||||
# Except from etcd-$(version) and etcdctl-$(version) binaries, we also
|
||||
# need etcd and etcdctl binaries for backward compatibility reasons.
|
||||
# That binary will be set to the last version from $(BUNDLED_ETCD_VERSIONS).
|
||||
BUNDLED_ETCD_VERSIONS?=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.0
|
||||
BUNDLED_ETCD_VERSIONS?=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.1
|
||||
|
||||
# LATEST_ETCD_VERSION identifies the most recent etcd version available.
|
||||
LATEST_ETCD_VERSION?=3.5.0
|
||||
LATEST_ETCD_VERSION?=3.5.1
|
||||
|
||||
# REVISION provides a version number for this image and all it's bundled
|
||||
# artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment
|
||||
# for each revision of this image at that etcd version.
|
||||
REVISION?=4
|
||||
REVISION?=0
|
||||
|
||||
# IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "<etcd-version>-<revision>".
|
||||
IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION)
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
supportedEtcdVersions = []string{"3.0.17", "3.1.12", "3.2.24", "3.3.17", "3.4.13", "3.5.0"}
|
||||
supportedEtcdVersions = []string{"3.0.17", "3.1.12", "3.2.24", "3.3.17", "3.4.13", "3.5.1"}
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -552,7 +552,6 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control
|
||||
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||
controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err)
|
||||
@ -565,7 +564,6 @@ func startPVProtectionController(ctx context.Context, controllerContext Controll
|
||||
go pvprotection.NewPVProtectionController(
|
||||
controllerContext.InformerFactory.Core().V1().PersistentVolumes(),
|
||||
controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
||||
).Run(ctx, 1)
|
||||
return nil, true, nil
|
||||
}
|
||||
|
@ -23,12 +23,9 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -1226,29 +1223,24 @@ func TestStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
}
|
||||
|
||||
satisfyingTestCases := map[string]struct {
|
||||
isExpectedMatch bool
|
||||
vol *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableStorageObjectInUseProtection bool
|
||||
isExpectedMatch bool
|
||||
vol *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
}{
|
||||
"pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
},
|
||||
"pv deletionTimeStamp set": {
|
||||
isExpectedMatch: false,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
isExpectedMatch: false,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range satisfyingTestCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
|
||||
|
||||
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
|
||||
// expected to match but got an error
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
@ -1262,28 +1254,23 @@ func TestStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
}
|
||||
|
||||
filteringTestCases := map[string]struct {
|
||||
isExpectedMatch bool
|
||||
vol persistentVolumeOrderedIndex
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableStorageObjectInUseProtection bool
|
||||
isExpectedMatch bool
|
||||
vol persistentVolumeOrderedIndex
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
}{
|
||||
"pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
},
|
||||
"pv deletionTimeStamp set": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
},
|
||||
}
|
||||
for name, testCase := range filteringTestCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
|
||||
|
||||
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && testCase.isExpectedMatch {
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
@ -45,7 +44,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
@ -275,10 +273,8 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
|
||||
requestedSize := requestedQty.Value()
|
||||
|
||||
// check if PV's DeletionTimeStamp is set, if so, return error.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||
return fmt.Errorf("the volume is marked for deletion %q", volume.Name)
|
||||
}
|
||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||
return fmt.Errorf("the volume is marked for deletion %q", volume.Name)
|
||||
}
|
||||
|
||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||
|
@ -25,12 +25,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/tools/reference"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -227,10 +225,8 @@ func FindMatchingVolume(
|
||||
}
|
||||
|
||||
// check if PV's DeletionTimeStamp is set, if so, skip this volume.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeAffinityValid := true
|
||||
|
@ -53,17 +53,13 @@ type Controller struct {
|
||||
podIndexer cache.Indexer
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
}
|
||||
|
||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) (*Controller, error) {
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -189,10 +185,6 @@ func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName strin
|
||||
}
|
||||
|
||||
func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeClaim) error {
|
||||
// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
|
||||
if !c.storageObjectInUseProtectionEnabled {
|
||||
return nil
|
||||
}
|
||||
claimClone := pvc.DeepCopy()
|
||||
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
|
||||
|
@ -185,31 +185,22 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
deletedPod *v1.Pod
|
||||
// List of expected kubeclient actions that should happen during the
|
||||
// test.
|
||||
expectedActions []clienttesting.Action
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
expectedActions []clienttesting.Action
|
||||
}{
|
||||
//
|
||||
// PVC events
|
||||
//
|
||||
{
|
||||
name: "StorageObjectInUseProtection Enabled, PVC without finalizer -> finalizer is added",
|
||||
name: "PVC without finalizer -> finalizer is added",
|
||||
updatedPVC: pvc(),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Disabled, PVC without finalizer -> finalizer is not added",
|
||||
updatedPVC: pvc(),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPVC: withProtectionFinalizer(pvc()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPVC: withProtectionFinalizer(pvc()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "saving PVC finalizer fails -> controller retries",
|
||||
@ -229,25 +220,14 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
// This succeeds
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Enabled, deleted PVC with finalizer -> finalizer is removed",
|
||||
name: "deleted PVC with finalizer -> finalizer is removed",
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Disabled, deleted PVC with finalizer -> finalizer is removed",
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "finalizer removal fails -> controller retries",
|
||||
@ -270,7 +250,6 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
// Succeeds
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + pod with the PVC exists -> finalizer is not removed",
|
||||
@ -290,16 +269,14 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + pod with the PVC finished but is not deleted -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
withStatus(v1.PodFailed, withPVC(defaultPVCName, pod())),
|
||||
},
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + pod with the PVC exists but is not in the Informer's cache yet -> finalizer is not removed",
|
||||
@ -311,7 +288,6 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
//
|
||||
// Pod events
|
||||
@ -321,18 +297,16 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
initialObjects: []runtime.Object{
|
||||
deleted(withProtectionFinalizer(pvc())),
|
||||
},
|
||||
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "updated finished Pod -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
deleted(withProtectionFinalizer(pvc())),
|
||||
},
|
||||
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "updated unscheduled Pod -> finalizer is removed",
|
||||
@ -344,7 +318,6 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted running Pod -> finalizer is removed",
|
||||
@ -356,7 +329,6 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "pod delete and create with same namespaced name seen as an update, old pod used deleted PVC -> finalizer is removed",
|
||||
@ -369,37 +341,33 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}),
|
||||
clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "pod delete and create with same namespaced name seen as an update, old pod used non-deleted PVC -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
withProtectionFinalizer(pvc()),
|
||||
},
|
||||
deletedPod: withPVC(defaultPVCName, pod()),
|
||||
updatedPod: withUID("uid2", pod()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
deletedPod: withPVC(defaultPVCName, pod()),
|
||||
updatedPod: withUID("uid2", pod()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "pod delete and create with same namespaced name seen as an update, both pods reference deleted PVC -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
deleted(withProtectionFinalizer(pvc())),
|
||||
},
|
||||
deletedPod: withPVC(defaultPVCName, pod()),
|
||||
updatedPod: withUID("uid2", withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
deletedPod: withPVC(defaultPVCName, pod()),
|
||||
updatedPod: withUID("uid2", withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "pod update from unscheduled to scheduled, deleted PVC is referenced -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
deleted(withProtectionFinalizer(pvc())),
|
||||
},
|
||||
deletedPod: unscheduled(withPVC(defaultPVCName, pod())),
|
||||
updatedPod: withPVC(defaultPVCName, pod()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
deletedPod: unscheduled(withPVC(defaultPVCName, pod())),
|
||||
updatedPod: withPVC(defaultPVCName, pod()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
}
|
||||
|
||||
@ -431,7 +399,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
podInformer := informers.Core().V1().Pods()
|
||||
|
||||
// Create the controller
|
||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled)
|
||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@ -47,17 +47,13 @@ type Controller struct {
|
||||
pvListerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
}
|
||||
|
||||
// NewPVProtectionController returns a new *Controller.
|
||||
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller {
|
||||
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolume_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -158,10 +154,6 @@ func (c *Controller) processPV(ctx context.Context, pvName string) error {
|
||||
}
|
||||
|
||||
func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
|
||||
// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
|
||||
if !c.storageObjectInUseProtectionEnabled {
|
||||
return nil
|
||||
}
|
||||
pvClone := pv.DeepCopy()
|
||||
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
||||
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -112,30 +112,21 @@ func TestPVProtectionController(t *testing.T) {
|
||||
updatedPV *v1.PersistentVolume
|
||||
// List of expected kubeclient actions that should happen during the
|
||||
// test.
|
||||
expectedActions []clienttesting.Action
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
expectedActions []clienttesting.Action
|
||||
}{
|
||||
// PV events
|
||||
//
|
||||
{
|
||||
name: "StorageObjectInUseProtection Enabled, PV without finalizer -> finalizer is added",
|
||||
name: "PV without finalizer -> finalizer is added",
|
||||
updatedPV: pv(),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Disabled, PV without finalizer -> finalizer is added",
|
||||
updatedPV: pv(),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPV: withProtectionFinalizer(pv()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPV: withProtectionFinalizer(pv()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "saving PVC finalizer fails -> controller retries",
|
||||
@ -155,23 +146,13 @@ func TestPVProtectionController(t *testing.T) {
|
||||
// This succeeds
|
||||
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Enabled, deleted PV with finalizer -> finalizer is removed",
|
||||
name: "deleted PV with finalizer -> finalizer is removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(pv())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Disabled, deleted PV with finalizer -> finalizer is removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(pv())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "finalizer removal fails -> controller retries",
|
||||
@ -191,13 +172,11 @@ func TestPVProtectionController(t *testing.T) {
|
||||
// Succeeds
|
||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(boundPV())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(boundPV())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
},
|
||||
}
|
||||
|
||||
@ -231,7 +210,7 @@ func TestPVProtectionController(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the controller
|
||||
ctrl := NewPVProtectionController(pvInformer, client, test.storageObjectInUseProtectionEnabled)
|
||||
ctrl := NewPVProtectionController(pvInformer, client)
|
||||
|
||||
// Start the test by simulating an event
|
||||
if test.updatedPV != nil {
|
||||
|
@ -570,18 +570,16 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||
return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
// Pods that uses a PVC that is being deleted must not be started.
|
||||
//
|
||||
// In case an old kubelet is running without this check or some kubelets
|
||||
// have this feature disabled, the worst that can happen is that such
|
||||
// pod is scheduled. This was the default behavior in 1.8 and earlier
|
||||
// and users should not be that surprised.
|
||||
// It should happen only in very rare case when scheduler schedules
|
||||
// a pod and user deletes a PVC that's used by it at the same time.
|
||||
if pvc.ObjectMeta.DeletionTimestamp != nil {
|
||||
return nil, errors.New("PVC is being deleted")
|
||||
}
|
||||
// Pods that uses a PVC that is being deleted must not be started.
|
||||
//
|
||||
// In case an old kubelet is running without this check or some kubelets
|
||||
// have this feature disabled, the worst that can happen is that such
|
||||
// pod is scheduled. This was the default behavior in 1.8 and earlier
|
||||
// and users should not be that surprised.
|
||||
// It should happen only in very rare case when scheduler schedules
|
||||
// a pod and user deletes a PVC that's used by it at the same time.
|
||||
if pvc.ObjectMeta.DeletionTimestamp != nil {
|
||||
return nil, errors.New("PVC is being deleted")
|
||||
}
|
||||
|
||||
if pvc.Status.Phase != v1.ClaimBound {
|
||||
|
@ -261,10 +261,9 @@ func NewVolumeBinder(
|
||||
// returned.
|
||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
|
||||
podVolumes = &PodVolumes{}
|
||||
podName := getPodName(pod)
|
||||
|
||||
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
|
||||
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
|
||||
klog.V(5).InfoS("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
|
||||
// Initialize to true for pods that don't have volumes. These
|
||||
// booleans get translated into reason strings when the function
|
||||
@ -316,7 +315,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
|
||||
|
||||
// Check PV node affinity on bound volumes
|
||||
if len(boundClaims) > 0 {
|
||||
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, podName)
|
||||
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -372,9 +371,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
|
||||
// 2. Update the pvcCache with the new PVCs with annotations set
|
||||
// 3. Update PodVolumes again with cached API updates for PVs and PVCs.
|
||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) {
|
||||
podName := getPodName(assumedPod)
|
||||
|
||||
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
|
||||
klog.V(4).InfoS("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
|
||||
@ -382,7 +379,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod
|
||||
}()
|
||||
|
||||
if allBound := b.arePodVolumesBound(assumedPod); allBound {
|
||||
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
|
||||
klog.V(4).InfoS("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -390,14 +387,15 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod
|
||||
newBindings := []*BindingInfo{}
|
||||
for _, binding := range podVolumes.StaticBindings {
|
||||
newPV, dirty, err := pvutil.GetBindVolumeToClaim(binding.pv, binding.pvc)
|
||||
klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
podName,
|
||||
binding.pv.Name,
|
||||
binding.pvc.Name,
|
||||
newPV,
|
||||
dirty,
|
||||
err)
|
||||
klog.V(5).InfoS("AssumePodVolumes: GetBindVolumeToClaim",
|
||||
"pod", klog.KObj(assumedPod),
|
||||
"PV", klog.KObj(binding.pv),
|
||||
"PVC", klog.KObj(binding.pvc),
|
||||
"newPV", klog.KObj(newPV),
|
||||
"dirty", dirty,
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "AssumePodVolumes: fail to GetBindVolumeToClaim")
|
||||
b.revertAssumedPVs(newBindings)
|
||||
return false, err
|
||||
}
|
||||
@ -444,8 +442,7 @@ func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) {
|
||||
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
|
||||
// by the PV controller.
|
||||
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) {
|
||||
podName := getPodName(assumedPod)
|
||||
klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
|
||||
klog.V(4).InfoS("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName))
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@ -457,7 +454,7 @@ func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes
|
||||
claimsToProvision := podVolumes.DynamicProvisions
|
||||
|
||||
// Start API operations
|
||||
err = b.bindAPIUpdate(podName, bindings, claimsToProvision)
|
||||
err = b.bindAPIUpdate(assumedPod, bindings, claimsToProvision)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -481,7 +478,8 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string {
|
||||
}
|
||||
|
||||
// bindAPIUpdate makes the API update for those PVs/PVCs.
|
||||
func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
|
||||
func (b *volumeBinder) bindAPIUpdate(pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
|
||||
podName := getPodName(pod)
|
||||
if bindings == nil {
|
||||
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
||||
}
|
||||
@ -511,16 +509,15 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, cl
|
||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||
// There is no API rollback if the actual binding fails
|
||||
for _, binding = range bindings {
|
||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
|
||||
klog.V(5).InfoS("bindAPIUpdate: binding PV to PVC", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
|
||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||
claimKey := getPVCName(binding.pvc)
|
||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
||||
klog.V(2).InfoS("Claim bound to volume", "PVC", klog.KObj(binding.pvc), "PV", klog.KObj(binding.pv))
|
||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
||||
klog.V(4).InfoS("Updating PersistentVolume: binding to claim failed", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err)
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey)
|
||||
klog.V(4).InfoS("Updating PersistentVolume: bound to claim", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
|
||||
// Save updated object from apiserver for later checking.
|
||||
binding.pv = newPV
|
||||
lastProcessedBinding++
|
||||
@ -529,7 +526,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, cl
|
||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||
// PV controller is expected to signal back by removing related annotations if actual provisioning fails
|
||||
for i, claim = range claimsToProvision {
|
||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||
klog.V(5).InfoS("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim))
|
||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -573,7 +570,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
csiNode, err := b.csiNodeLister.Get(node.Name)
|
||||
if err != nil {
|
||||
// TODO: return the error once CSINode is created by default
|
||||
klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err)
|
||||
klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
|
||||
}
|
||||
|
||||
// Check for any conditions that might require scheduling retry
|
||||
@ -585,7 +582,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("pod does not exist any more: %w", err)
|
||||
}
|
||||
klog.Errorf("failed to get pod %s/%s from the lister: %v", pod.Namespace, pod.Name, err)
|
||||
klog.ErrorS(err, "Failed to get pod from the lister", "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
for _, binding := range bindings {
|
||||
@ -681,7 +678,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
}
|
||||
|
||||
// All pvs and pvcs that we operated on are bound
|
||||
klog.V(4).Infof("All PVCs for pod %q are bound", podName)
|
||||
klog.V(4).InfoS("All PVCs for pod are bound", "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -725,12 +722,12 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste
|
||||
|
||||
fullyBound := b.isPVCFullyBound(pvc)
|
||||
if fullyBound {
|
||||
klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvc.Spec.VolumeName)
|
||||
klog.V(5).InfoS("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
|
||||
} else {
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvc.Spec.VolumeName)
|
||||
klog.V(5).InfoS("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
|
||||
} else {
|
||||
klog.V(5).Infof("PVC %q is not bound", pvcKey)
|
||||
klog.V(5).InfoS("PVC is not bound", "PVC", klog.KObj(pvc))
|
||||
}
|
||||
}
|
||||
return fullyBound, pvc, nil
|
||||
@ -787,11 +784,11 @@ func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
|
||||
}
|
||||
|
||||
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, bool, error) {
|
||||
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) {
|
||||
csiNode, err := b.csiNodeLister.Get(node.Name)
|
||||
if err != nil {
|
||||
// TODO: return the error once CSINode is created by default
|
||||
klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err)
|
||||
klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
|
||||
}
|
||||
|
||||
for _, pvc := range claims {
|
||||
@ -811,20 +808,19 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
|
||||
|
||||
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err)
|
||||
klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err)
|
||||
return false, true, nil
|
||||
}
|
||||
klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
|
||||
klog.V(5).InfoS("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
|
||||
klog.V(4).InfoS("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
// findMatchingVolumes tries to find matching volumes for given claims,
|
||||
// and return unbound claims for further provision.
|
||||
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
|
||||
podName := getPodName(pod)
|
||||
// Sort all the claims by increasing size request to get the smallest fits
|
||||
sort.Sort(byPVCSize(claimsToBind))
|
||||
|
||||
@ -836,7 +832,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
// Get storage class name from each PVC
|
||||
storageClassName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
|
||||
allPVs := b.pvCache.ListPVs(storageClassName)
|
||||
pvcName := getPVCName(pvc)
|
||||
|
||||
// Find a matching PV
|
||||
pv, err := pvutil.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true)
|
||||
@ -844,7 +839,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
return false, nil, nil, err
|
||||
}
|
||||
if pv == nil {
|
||||
klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name)
|
||||
klog.V(4).InfoS("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node))
|
||||
unboundClaims = append(unboundClaims, pvc)
|
||||
foundMatches = false
|
||||
continue
|
||||
@ -853,11 +848,11 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
// matching PV needs to be excluded so we don't select it again
|
||||
chosenPVs[pv.Name] = pv
|
||||
bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc})
|
||||
klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName)
|
||||
klog.V(5).InfoS("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
if foundMatches {
|
||||
klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
|
||||
klog.V(4).InfoS("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
}
|
||||
|
||||
return
|
||||
@ -867,7 +862,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
// findMatchingVolumes, and do not have matching volumes for binding), and return true
|
||||
// if all of the claims are eligible for dynamic provision.
|
||||
func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) {
|
||||
podName := getPodName(pod)
|
||||
dynamicProvisions = []*v1.PersistentVolumeClaim{}
|
||||
|
||||
// We return early with provisionedClaims == nil if a check
|
||||
@ -885,13 +879,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
}
|
||||
provisioner := class.Provisioner
|
||||
if provisioner == "" || provisioner == pvutil.NotSupportedProvisioner {
|
||||
klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
|
||||
klog.V(4).InfoS("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim))
|
||||
return false, true, nil, nil
|
||||
}
|
||||
|
||||
// Check if the node can satisfy the topology requirement in the class
|
||||
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
||||
klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName)
|
||||
klog.V(4).InfoS("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim))
|
||||
return false, true, nil, nil
|
||||
}
|
||||
|
||||
@ -908,7 +902,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
dynamicProvisions = append(dynamicProvisions, claim)
|
||||
|
||||
}
|
||||
klog.V(4).Infof("Provisioning for %d claims of pod %q that has no matching volumes on node %q ...", len(claimsToProvision), podName, node.Name)
|
||||
klog.V(4).InfoS("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
|
||||
return true, true, dynamicProvisions, nil
|
||||
}
|
||||
@ -974,8 +968,8 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten
|
||||
|
||||
// TODO (?): this doesn't give any information about which pools where considered and why
|
||||
// they had to be rejected. Log that above? But that might be a lot of log output...
|
||||
klog.V(4).Infof("Node %q has no accessible CSIStorageCapacity with enough capacity for PVC %s/%s of size %d and storage class %q",
|
||||
node.Name, claim.Namespace, claim.Name, sizeInBytes, storageClass.Name)
|
||||
klog.V(4).InfoS("Node has no accessible CSIStorageCapacity with enough capacity for PVC",
|
||||
"node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -997,7 +991,7 @@ func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1beta1.CSI
|
||||
selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology)
|
||||
if err != nil {
|
||||
// This should never happen because NodeTopology must be valid.
|
||||
klog.Errorf("unexpected error converting %+v to a label selector: %v", capacity.NodeTopology, err)
|
||||
klog.ErrorS(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology)
|
||||
return false
|
||||
}
|
||||
return selector.Matches(labels.Set(node.Labels))
|
||||
|
@ -19,6 +19,7 @@ package volumebinding
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
@ -187,7 +188,8 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}, csiStorageCapacity ...b
|
||||
informerFactory.Start(stopCh)
|
||||
for v, synced := range informerFactory.WaitForCacheSync(stopCh) {
|
||||
if !synced {
|
||||
klog.Fatalf("Error syncing informer for %v", v)
|
||||
klog.ErrorS(nil, "Error syncing informer", "informer", v)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1528,7 +1530,7 @@ func TestBindAPIUpdate(t *testing.T) {
|
||||
testEnv.assumeVolumes(t, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Execute
|
||||
err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs)
|
||||
err := testEnv.internalBinder.bindAPIUpdate(pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
@ -2069,7 +2071,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
go func(scenario scenarioType) {
|
||||
time.Sleep(5 * time.Second)
|
||||
// Sleep a while to run after bindAPIUpdate in BindPodVolumes
|
||||
klog.V(5).Infof("Running delay function")
|
||||
klog.V(5).InfoS("Running delay function")
|
||||
scenario.delayFunc(t, testEnv, pod, scenario.initPVs, scenario.initPVCs)
|
||||
}(scenario)
|
||||
}
|
||||
|
@ -21,11 +21,8 @@ import (
|
||||
"io"
|
||||
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission/initializer"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -45,12 +42,9 @@ func Register(plugins *admission.Plugins) {
|
||||
// storageProtectionPlugin holds state for and implements the admission plugin.
|
||||
type storageProtectionPlugin struct {
|
||||
*admission.Handler
|
||||
|
||||
storageObjectInUseProtection bool
|
||||
}
|
||||
|
||||
var _ admission.Interface = &storageProtectionPlugin{}
|
||||
var _ initializer.WantsFeatures = &storageProtectionPlugin{}
|
||||
|
||||
// newPlugin creates a new admission plugin.
|
||||
func newPlugin() *storageProtectionPlugin {
|
||||
@ -70,10 +64,6 @@ var (
|
||||
// This prevents users from deleting a PVC that's used by a running pod.
|
||||
// This also prevents admin from deleting a PV that's bound by a PVC
|
||||
func (c *storageProtectionPlugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error {
|
||||
if !c.storageObjectInUseProtection {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch a.GetResource().GroupResource() {
|
||||
case pvResource:
|
||||
return c.admitPV(a)
|
||||
@ -129,11 +119,3 @@ func (c *storageProtectionPlugin) admitPVC(a admission.Attributes) error {
|
||||
pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *storageProtectionPlugin) InspectFeatureGates(featureGates featuregate.FeatureGate) {
|
||||
c.storageObjectInUseProtection = featureGates.Enabled(features.StorageObjectInUseProtection)
|
||||
}
|
||||
|
||||
func (c *storageProtectionPlugin) ValidateInitialization() error {
|
||||
return nil
|
||||
}
|
||||
|
@ -61,7 +61,6 @@ func TestAdmit(t *testing.T) {
|
||||
resource schema.GroupVersionResource
|
||||
object runtime.Object
|
||||
expectedObject runtime.Object
|
||||
featureEnabled bool
|
||||
namespace string
|
||||
}{
|
||||
{
|
||||
@ -69,7 +68,6 @@ func TestAdmit(t *testing.T) {
|
||||
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
||||
claim,
|
||||
claimWithFinalizer,
|
||||
true,
|
||||
claim.Namespace,
|
||||
},
|
||||
{
|
||||
@ -77,23 +75,13 @@ func TestAdmit(t *testing.T) {
|
||||
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
||||
claimWithFinalizer,
|
||||
claimWithFinalizer,
|
||||
true,
|
||||
claimWithFinalizer.Namespace,
|
||||
},
|
||||
{
|
||||
"disabled feature -> no finalizer",
|
||||
api.SchemeGroupVersion.WithResource("persistentvolumeclaims"),
|
||||
claim,
|
||||
claim,
|
||||
false,
|
||||
claim.Namespace,
|
||||
},
|
||||
{
|
||||
"create -> add finalizer",
|
||||
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
||||
pv,
|
||||
pvWithFinalizer,
|
||||
true,
|
||||
pv.Namespace,
|
||||
},
|
||||
{
|
||||
@ -101,23 +89,13 @@ func TestAdmit(t *testing.T) {
|
||||
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
||||
pvWithFinalizer,
|
||||
pvWithFinalizer,
|
||||
true,
|
||||
pvWithFinalizer.Namespace,
|
||||
},
|
||||
{
|
||||
"disabled feature -> no finalizer",
|
||||
api.SchemeGroupVersion.WithResource("persistentvolumes"),
|
||||
pv,
|
||||
pv,
|
||||
false,
|
||||
pv.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctrl := newPlugin()
|
||||
ctrl.storageObjectInUseProtection = test.featureEnabled
|
||||
|
||||
obj := test.object.DeepCopyObject()
|
||||
attrs := admission.NewAttributesRecord(
|
||||
|
@ -202,7 +202,6 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
etcdImage := imageutils.GetE2EImage(imageutils.Etcd)
|
||||
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
etcdLocalhostAddress := "127.0.0.1"
|
||||
if framework.TestContext.ClusterIsIPv6() {
|
||||
etcdLocalhostAddress = "::1"
|
||||
@ -250,31 +249,10 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
},
|
||||
},
|
||||
}
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Template.Spec.Containers = containers
|
||||
d.Spec.Template.Spec.Volumes = volumes
|
||||
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
|
||||
|
@ -266,7 +266,6 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
||||
// Create the deployment of the webhook
|
||||
podLabels := map[string]string{"app": "sample-crd-conversion-webhook", "crd-webhook": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
mounts := []v1.VolumeMount{
|
||||
{
|
||||
Name: "crd-conversion-webhook-certs",
|
||||
@ -311,31 +310,10 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: containerPort}},
|
||||
},
|
||||
}
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentCRDName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
d := e2edeployment.NewDeployment(deploymentCRDName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Template.Spec.Containers = containers
|
||||
d.Spec.Template.Spec.Volumes = volumes
|
||||
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
|
||||
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@ -126,20 +127,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
|
||||
}
|
||||
|
||||
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment {
|
||||
replicas := int32(2)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpec(labels),
|
||||
},
|
||||
}
|
||||
return e2edeployment.NewDeployment(deploymentName, 2, labels, "nginx", imageutils.GetE2EImage(imageutils.Nginx), appsv1.RollingUpdateDeploymentStrategyType)
|
||||
}
|
||||
|
||||
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
|
||||
|
@ -767,7 +767,6 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
|
||||
// Create the deployment of the webhook
|
||||
podLabels := map[string]string{"app": "sample-webhook", "webhook": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
mounts := []v1.VolumeMount{
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
@ -812,31 +811,10 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
|
||||
Ports: []v1.ContainerPort{{ContainerPort: containerPort}},
|
||||
},
|
||||
}
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Template.Spec.Containers = containers
|
||||
d.Spec.Template.Spec.Volumes = volumes
|
||||
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
ginkgo.By("Wait for the deployment to be ready")
|
||||
|
@ -192,9 +192,6 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
testDeploymentNoReplicas := int32(0)
|
||||
testDeploymentLabels := map[string]string{"test-deployment-static": "true"}
|
||||
testDeploymentLabelsFlat := "test-deployment-static=true"
|
||||
testDeploymentLabelSelectors := metav1.LabelSelector{
|
||||
MatchLabels: testDeploymentLabels,
|
||||
}
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = testDeploymentLabelsFlat
|
||||
@ -205,29 +202,13 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to list Deployments")
|
||||
|
||||
ginkgo.By("creating a Deployment")
|
||||
testDeployment := appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testDeploymentName,
|
||||
Labels: map[string]string{"test-deployment-static": "true"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &testDeploymentDefaultReplicas,
|
||||
Selector: &testDeploymentLabelSelectors,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testDeploymentLabelSelectors.MatchLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
Containers: []v1.Container{{
|
||||
Name: testDeploymentName,
|
||||
Image: testDeploymentInitialImage,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), &testDeployment, metav1.CreateOptions{})
|
||||
testDeployment := e2edeployment.NewDeployment(
|
||||
testDeploymentName, testDeploymentDefaultReplicas, testDeploymentLabels,
|
||||
testDeploymentName, testDeploymentInitialImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"}
|
||||
testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
|
||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), testDeployment, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
|
||||
|
||||
ginkgo.By("waiting for Deployment to be created")
|
||||
|
@ -57,6 +57,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -1101,35 +1102,14 @@ func generateBacksideHTTPSServiceSpec() *v1.Service {
|
||||
}
|
||||
|
||||
func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-https",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
|
||||
"app": "echoheaders-https",
|
||||
}},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "echoheaders-https",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "echoheaders-https",
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
Ports: []v1.ContainerPort{{
|
||||
ContainerPort: 8443,
|
||||
Name: "echo-443",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
labels := map[string]string{"app": "echoheaders-https"}
|
||||
d := e2edeployment.NewDeployment("echoheaders-https", 0, labels, "echoheaders-https", imageutils.GetE2EImage(imageutils.EchoServer), appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Replicas = nil
|
||||
d.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{
|
||||
ContainerPort: 8443,
|
||||
Name: "echo-443",
|
||||
}}
|
||||
return d
|
||||
}
|
||||
|
||||
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
@ -104,26 +105,10 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta
|
||||
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue))
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: podSpec,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
},
|
||||
}
|
||||
d := e2edeployment.NewDeployment(name, replicas, map[string]string{"name": name}, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.ObjectMeta.Namespace = namespace
|
||||
d.Spec.Template.Spec = podSpec
|
||||
return d
|
||||
}
|
||||
|
||||
// StackdriverExporterPod is a Pod of simple application that exports a metric of fixed value to
|
||||
@ -188,26 +173,10 @@ func stackdriverExporterContainerSpec(name string, namespace string, metricName
|
||||
// one exposing a metric in prometheus format and second a prometheus-to-sd container
|
||||
// that scrapes the metric and pushes it to stackdriver.
|
||||
func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: prometheusExporterPodSpec(CustomMetricName, metricValue, 8080),
|
||||
},
|
||||
Replicas: &replicas,
|
||||
},
|
||||
}
|
||||
d := e2edeployment.NewDeployment(name, replicas, map[string]string{"name": name}, "", "", appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.ObjectMeta.Namespace = namespace
|
||||
d.Spec.Template.Spec = prometheusExporterPodSpec(CustomMetricName, metricValue, 8080)
|
||||
return d
|
||||
}
|
||||
|
||||
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) v1.PodSpec {
|
||||
|
@ -68,38 +68,16 @@ func iperf2ServerDeployment(client clientset.Interface, namespace string, isIPV6
|
||||
if isIPV6 {
|
||||
args = append(args, "-V")
|
||||
}
|
||||
deploymentSpec := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "iperf2-server-deployment",
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "iperf2-server",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Command: []string{"iperf"},
|
||||
Args: args,
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: iperf2Port,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
deploymentSpec := e2edeployment.NewDeployment(
|
||||
"iperf2-server-deployment", replicas, labels, "iperf2-server",
|
||||
imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploymentSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0].Command = []string{"iperf"}
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0].Args = args
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: iperf2Port,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
)
|
||||
@ -445,39 +446,21 @@ func generateScaleTestServiceSpec(suffix string) *v1.Service {
|
||||
}
|
||||
|
||||
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleTestBackendName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &numReplicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: scaleTestLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: scaleTestBackendName,
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 8080}},
|
||||
ReadinessProbe: &v1.Probe{
|
||||
ProbeHandler: v1.ProbeHandler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Port: intstr.FromInt(8080),
|
||||
Path: "/healthz",
|
||||
},
|
||||
},
|
||||
FailureThreshold: 10,
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
d := e2edeployment.NewDeployment(
|
||||
scaleTestBackendName, numReplicas, scaleTestLabels, scaleTestBackendName,
|
||||
imageutils.GetE2EImage(imageutils.EchoServer), appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 8080}}
|
||||
d.Spec.Template.Spec.Containers[0].ReadinessProbe = &v1.Probe{
|
||||
ProbeHandler: v1.ProbeHandler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Port: intstr.FromInt(8080),
|
||||
Path: "/healthz",
|
||||
},
|
||||
},
|
||||
FailureThreshold: 10,
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user