From 76d20c5bce83c3fd5cca3bbf4ba1bf0edd0f54aa Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Wed, 6 Oct 2021 15:38:21 +0300 Subject: [PATCH 1/4] tests: Use E2E framework deployments Deployments can be created easier with the NewDeployment found in test/e2e/framework/deployment. --- test/e2e/apimachinery/aggregator.go | 30 ++---------- .../apimachinery/crd_conversion_webhook.go | 30 ++---------- test/e2e/apimachinery/garbage_collector.go | 16 +----- test/e2e/apimachinery/webhook.go | 30 ++---------- test/e2e/apps/deployment.go | 33 +++---------- test/e2e/framework/ingress/ingress_utils.go | 38 ++++---------- .../monitoring/custom_metrics_deployments.go | 49 ++++--------------- test/e2e/network/networking_perf.go | 42 ++++------------ test/e2e/network/scale/ingress.go | 47 ++++++------------ 9 files changed, 64 insertions(+), 251 deletions(-) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index a5afd256631..6616a45cd8d 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -202,7 +202,6 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl etcdImage := imageutils.GetE2EImage(imageutils.Etcd) podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"} replicas := int32(1) - zero := int64(0) etcdLocalhostAddress := "127.0.0.1" if framework.TestContext.ClusterIsIPv6() { etcdLocalhostAddress = "::1" @@ -250,31 +249,10 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, }, } - d := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Labels: podLabels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: podLabels, - }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: podLabels, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &zero, - Containers: containers, - Volumes: volumes, - }, - }, - }, - } + d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType) + d.Spec.Template.Spec.Containers = containers + d.Spec.Template.Spec.Volumes = volumes + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index a45e369f307..4898c8375a6 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -266,7 +266,6 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, // Create the deployment of the webhook podLabels := map[string]string{"app": "sample-crd-conversion-webhook", "crd-webhook": "true"} replicas := int32(1) - zero := int64(0) mounts := []v1.VolumeMount{ { Name: "crd-conversion-webhook-certs", @@ -311,31 +310,10 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, Ports: []v1.ContainerPort{{ContainerPort: containerPort}}, }, } - d := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentCRDName, - Labels: podLabels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: podLabels, - }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: podLabels, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &zero, - Containers: containers, - Volumes: volumes, - }, - }, - }, - } + d := e2edeployment.NewDeployment(deploymentCRDName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType) + d.Spec.Template.Spec.Containers = containers + d.Spec.Template.Spec.Volumes = volumes + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 4bfe7ba1864..e337a46d60a 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -39,6 +39,7 @@ import ( "k8s.io/apiserver/pkg/storage/names" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -126,20 +127,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec { } func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment { - replicas := int32(2) - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{MatchLabels: labels}, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpec(labels), - }, - } + return e2edeployment.NewDeployment(deploymentName, 2, labels, "nginx", imageutils.GetE2EImage(imageutils.Nginx), appsv1.RollingUpdateDeploymentStrategyType) } func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController { diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 4f73c71b73b..360993e3889 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -767,7 +767,6 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert // Create the deployment of the webhook podLabels := map[string]string{"app": "sample-webhook", "webhook": "true"} replicas := int32(1) - zero := int64(0) mounts := []v1.VolumeMount{ { Name: "webhook-certs", @@ -812,31 +811,10 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert Ports: []v1.ContainerPort{{ContainerPort: containerPort}}, }, } - d := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Labels: podLabels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: podLabels, - }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: podLabels, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &zero, - Containers: containers, - Volumes: volumes, - }, - }, - }, - } + d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType) + d.Spec.Template.Spec.Containers = containers + d.Spec.Template.Spec.Volumes = volumes + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) ginkgo.By("Wait for the deployment to be ready") diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 3f9082bf07d..95e7aea7db6 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -192,9 +192,6 @@ var _ = SIGDescribe("Deployment", func() { testDeploymentNoReplicas := int32(0) testDeploymentLabels := map[string]string{"test-deployment-static": "true"} testDeploymentLabelsFlat := "test-deployment-static=true" - testDeploymentLabelSelectors := metav1.LabelSelector{ - MatchLabels: testDeploymentLabels, - } w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = testDeploymentLabelsFlat @@ -205,29 +202,13 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to list Deployments") ginkgo.By("creating a Deployment") - testDeployment := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: testDeploymentName, - Labels: map[string]string{"test-deployment-static": "true"}, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &testDeploymentDefaultReplicas, - Selector: &testDeploymentLabelSelectors, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: testDeploymentLabelSelectors.MatchLabels, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &one, - Containers: []v1.Container{{ - Name: testDeploymentName, - Image: testDeploymentInitialImage, - }}, - }, - }, - }, - } - _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), &testDeployment, metav1.CreateOptions{}) + testDeployment := e2edeployment.NewDeployment( + testDeploymentName, testDeploymentDefaultReplicas, testDeploymentLabels, + testDeploymentName, testDeploymentInitialImage, appsv1.RollingUpdateDeploymentStrategyType) + testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"} + testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one + + _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), testDeployment, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName) ginkgo.By("waiting for Deployment to be created") diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 50673171e43..c0aff34161d 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -57,6 +57,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/test/e2e/framework" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" testutils "k8s.io/kubernetes/test/utils" @@ -1101,35 +1102,14 @@ func generateBacksideHTTPSServiceSpec() *v1.Service { } func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment { - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "echoheaders-https", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{ - "app": "echoheaders-https", - }}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": "echoheaders-https", - }, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "echoheaders-https", - Image: imageutils.GetE2EImage(imageutils.EchoServer), - Ports: []v1.ContainerPort{{ - ContainerPort: 8443, - Name: "echo-443", - }}, - }, - }, - }, - }, - }, - } + labels := map[string]string{"app": "echoheaders-https"} + d := e2edeployment.NewDeployment("echoheaders-https", 0, labels, "echoheaders-https", imageutils.GetE2EImage(imageutils.EchoServer), appsv1.RollingUpdateDeploymentStrategyType) + d.Spec.Replicas = nil + d.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ + ContainerPort: 8443, + Name: "echo-443", + }} + return d } // SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured. diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index 4079a90e742..78ece15e3a9 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -26,6 +26,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" imageutils "k8s.io/kubernetes/test/utils/image" gcm "google.golang.org/api/monitoring/v3" @@ -104,26 +105,10 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue)) } - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"name": name}, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "name": name, - }, - }, - Spec: podSpec, - }, - Replicas: &replicas, - }, - } + d := e2edeployment.NewDeployment(name, replicas, map[string]string{"name": name}, "", "", appsv1.RollingUpdateDeploymentStrategyType) + d.ObjectMeta.Namespace = namespace + d.Spec.Template.Spec = podSpec + return d } // StackdriverExporterPod is a Pod of simple application that exports a metric of fixed value to @@ -188,26 +173,10 @@ func stackdriverExporterContainerSpec(name string, namespace string, metricName // one exposing a metric in prometheus format and second a prometheus-to-sd container // that scrapes the metric and pushes it to stackdriver. func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *appsv1.Deployment { - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"name": name}, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "name": name, - }, - }, - Spec: prometheusExporterPodSpec(CustomMetricName, metricValue, 8080), - }, - Replicas: &replicas, - }, - } + d := e2edeployment.NewDeployment(name, replicas, map[string]string{"name": name}, "", "", appsv1.RollingUpdateDeploymentStrategyType) + d.ObjectMeta.Namespace = namespace + d.Spec.Template.Spec = prometheusExporterPodSpec(CustomMetricName, metricValue, 8080) + return d } func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) v1.PodSpec { diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index d66de56871c..136888bd83b 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -68,38 +68,16 @@ func iperf2ServerDeployment(client clientset.Interface, namespace string, isIPV6 if isIPV6 { args = append(args, "-V") } - deploymentSpec := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "iperf2-server-deployment", - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &one, - Containers: []v1.Container{ - { - Name: "iperf2-server", - Image: imageutils.GetE2EImage(imageutils.Agnhost), - Command: []string{"iperf"}, - Args: args, - Ports: []v1.ContainerPort{ - { - ContainerPort: iperf2Port, - Protocol: v1.ProtocolTCP, - }, - }, - }, - }, - }, - }, + deploymentSpec := e2edeployment.NewDeployment( + "iperf2-server-deployment", replicas, labels, "iperf2-server", + imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RollingUpdateDeploymentStrategyType) + deploymentSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &one + deploymentSpec.Spec.Template.Spec.Containers[0].Command = []string{"iperf"} + deploymentSpec.Spec.Template.Spec.Containers[0].Args = args + deploymentSpec.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{ + { + ContainerPort: iperf2Port, + Protocol: v1.ProtocolTCP, }, } diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 3b814a4ffeb..6f75650297c 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -32,6 +32,7 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/kubernetes/test/e2e/framework" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress" "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) @@ -445,39 +446,21 @@ func generateScaleTestServiceSpec(suffix string) *v1.Service { } func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deployment { - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: scaleTestBackendName, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &numReplicas, - Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: scaleTestLabels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: scaleTestBackendName, - Image: imageutils.GetE2EImage(imageutils.EchoServer), - Ports: []v1.ContainerPort{{ContainerPort: 8080}}, - ReadinessProbe: &v1.Probe{ - ProbeHandler: v1.ProbeHandler{ - HTTPGet: &v1.HTTPGetAction{ - Port: intstr.FromInt(8080), - Path: "/healthz", - }, - }, - FailureThreshold: 10, - PeriodSeconds: 1, - SuccessThreshold: 1, - TimeoutSeconds: 1, - }, - }, - }, - }, + d := e2edeployment.NewDeployment( + scaleTestBackendName, numReplicas, scaleTestLabels, scaleTestBackendName, + imageutils.GetE2EImage(imageutils.EchoServer), appsv1.RollingUpdateDeploymentStrategyType) + d.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 8080}} + d.Spec.Template.Spec.Containers[0].ReadinessProbe = &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + HTTPGet: &v1.HTTPGetAction{ + Port: intstr.FromInt(8080), + Path: "/healthz", }, }, + FailureThreshold: 10, + PeriodSeconds: 1, + SuccessThreshold: 1, + TimeoutSeconds: 1, } + return d } From 07bf08690c599c1cd6fe17f82c02d6757fadeadf Mon Sep 17 00:00:00 2001 From: jyz0309 <45495947@qq.com> Date: Sat, 23 Oct 2021 23:23:47 +0800 Subject: [PATCH 2/4] migrate log to structure log Signed-off-by: jyz0309 <45495947@qq.com> add klog.Kobj Signed-off-by: jyz0309 <45495947@qq.com> use KObj Signed-off-by: jyz0309 <45495947@qq.com> address comment Signed-off-by: jyz0309 <45495947@qq.com> remove useless var Signed-off-by: jyz0309 <45495947@qq.com> format code Signed-off-by: jyz0309 <45495947@qq.com> address comment Signed-off-by: jyz0309 <45495947@qq.com> use err key Signed-off-by: jyz0309 <45495947@qq.com> use PVC Signed-off-by: jyz0309 <45495947@qq.com> improve log message Signed-off-by: jyz0309 <45495947@qq.com> address comment Signed-off-by: jyz0309 <45495947@qq.com> use pod instead podName Signed-off-by: jyz0309 <45495947@qq.com> --- .../framework/plugins/volumebinding/binder.go | 88 +++++++++---------- .../plugins/volumebinding/binder_test.go | 8 +- 2 files changed, 46 insertions(+), 50 deletions(-) diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index ca931b6e4cf..883a2c143dd 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -260,10 +260,9 @@ func NewVolumeBinder( // returned. func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { podVolumes = &PodVolumes{} - podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). - klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) + klog.V(5).InfoS("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node)) // Initialize to true for pods that don't have volumes. These // booleans get translated into reason strings when the function @@ -315,7 +314,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []* // Check PV node affinity on bound volumes if len(boundClaims) > 0 { - boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, podName) + boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod) if err != nil { return } @@ -371,9 +370,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []* // 2. Update the pvcCache with the new PVCs with annotations set // 3. Update PodVolumes again with cached API updates for PVs and PVCs. func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) { - podName := getPodName(assumedPod) - - klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) + klog.V(4).InfoS("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) defer func() { if err != nil { metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() @@ -381,7 +378,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod }() if allBound := b.arePodVolumesBound(assumedPod); allBound { - klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) + klog.V(4).InfoS("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) return true, nil } @@ -389,14 +386,15 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod newBindings := []*BindingInfo{} for _, binding := range podVolumes.StaticBindings { newPV, dirty, err := pvutil.GetBindVolumeToClaim(binding.pv, binding.pvc) - klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", - podName, - binding.pv.Name, - binding.pvc.Name, - newPV, - dirty, - err) + klog.V(5).InfoS("AssumePodVolumes: GetBindVolumeToClaim", + "pod", klog.KObj(assumedPod), + "PV", klog.KObj(binding.pv), + "PVC", klog.KObj(binding.pvc), + "newPV", klog.KObj(newPV), + "dirty", dirty, + ) if err != nil { + klog.ErrorS(err, "AssumePodVolumes: fail to GetBindVolumeToClaim") b.revertAssumedPVs(newBindings) return false, err } @@ -443,8 +441,7 @@ func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) { // makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound // by the PV controller. func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) { - podName := getPodName(assumedPod) - klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) + klog.V(4).InfoS("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName)) defer func() { if err != nil { @@ -456,7 +453,7 @@ func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes claimsToProvision := podVolumes.DynamicProvisions // Start API operations - err = b.bindAPIUpdate(podName, bindings, claimsToProvision) + err = b.bindAPIUpdate(assumedPod, bindings, claimsToProvision) if err != nil { return err } @@ -480,7 +477,8 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string { } // bindAPIUpdate makes the API update for those PVs/PVCs. -func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error { +func (b *volumeBinder) bindAPIUpdate(pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error { + podName := getPodName(pod) if bindings == nil { return fmt.Errorf("failed to get cached bindings for pod %q", podName) } @@ -510,16 +508,15 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, cl // Do the actual prebinding. Let the PV controller take care of the rest // There is no API rollback if the actual binding fails for _, binding = range bindings { - klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) + klog.V(5).InfoS("bindAPIUpdate: binding PV to PVC", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) // TODO: does it hurt if we make an api call and nothing needs to be updated? - claimKey := getPVCName(binding.pvc) - klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name) + klog.V(2).InfoS("Claim bound to volume", "PVC", klog.KObj(binding.pvc), "PV", klog.KObj(binding.pv)) newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{}) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err) + klog.V(4).InfoS("Updating PersistentVolume: binding to claim failed", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err) return err } - klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey) + klog.V(4).InfoS("Updating PersistentVolume: bound to claim", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) // Save updated object from apiserver for later checking. binding.pv = newPV lastProcessedBinding++ @@ -528,7 +525,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*BindingInfo, cl // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest // PV controller is expected to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { - klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) + klog.V(5).InfoS("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim)) newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{}) if err != nil { return err @@ -572,7 +569,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim csiNode, err := b.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default - klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err) + klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) } // Check for any conditions that might require scheduling retry @@ -584,7 +581,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim if apierrors.IsNotFound(err) { return false, fmt.Errorf("pod does not exist any more: %w", err) } - klog.Errorf("failed to get pod %s/%s from the lister: %v", pod.Namespace, pod.Name, err) + klog.ErrorS(err, "Failed to get pod from the lister", "pod", klog.KObj(pod)) } for _, binding := range bindings { @@ -680,7 +677,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim } // All pvs and pvcs that we operated on are bound - klog.V(4).Infof("All PVCs for pod %q are bound", podName) + klog.V(4).InfoS("All PVCs for pod are bound", "pod", klog.KObj(pod)) return true, nil } @@ -728,12 +725,12 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste fullyBound := b.isPVCFullyBound(pvc) if fullyBound { - klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvc.Spec.VolumeName) + klog.V(5).InfoS("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) } else { if pvc.Spec.VolumeName != "" { - klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvc.Spec.VolumeName) + klog.V(5).InfoS("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) } else { - klog.V(5).Infof("PVC %q is not bound", pvcKey) + klog.V(5).InfoS("PVC is not bound", "PVC", klog.KObj(pvc)) } } return fullyBound, pvc, nil @@ -790,11 +787,11 @@ func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil } -func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, bool, error) { +func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) { csiNode, err := b.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default - klog.V(4).Infof("Could not get a CSINode object for the node %q: %v", node.Name, err) + klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) } for _, pvc := range claims { @@ -814,20 +811,19 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { - klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) + klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err) return false, true, nil } - klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) + klog.V(5).InfoS("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod)) } - klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) + klog.V(4).InfoS("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node)) return true, true, nil } // findMatchingVolumes tries to find matching volumes for given claims, // and return unbound claims for further provision. func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { - podName := getPodName(pod) // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) @@ -839,7 +835,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi // Get storage class name from each PVC storageClassName := storagehelpers.GetPersistentVolumeClaimClass(pvc) allPVs := b.pvCache.ListPVs(storageClassName) - pvcName := getPVCName(pvc) // Find a matching PV pv, err := pvutil.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true) @@ -847,7 +842,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi return false, nil, nil, err } if pv == nil { - klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name) + klog.V(4).InfoS("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node)) unboundClaims = append(unboundClaims, pvc) foundMatches = false continue @@ -856,11 +851,11 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi // matching PV needs to be excluded so we don't select it again chosenPVs[pv.Name] = pv bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc}) - klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName) + klog.V(5).InfoS("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod)) } if foundMatches { - klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) + klog.V(4).InfoS("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node)) } return @@ -870,7 +865,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi // findMatchingVolumes, and do not have matching volumes for binding), and return true // if all of the claims are eligible for dynamic provision. func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) { - podName := getPodName(pod) dynamicProvisions = []*v1.PersistentVolumeClaim{} // We return early with provisionedClaims == nil if a check @@ -888,13 +882,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v } provisioner := class.Provisioner if provisioner == "" || provisioner == pvutil.NotSupportedProvisioner { - klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName) + klog.V(4).InfoS("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim)) return false, true, nil, nil } // Check if the node can satisfy the topology requirement in the class if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { - klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName) + klog.V(4).InfoS("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim)) return false, true, nil, nil } @@ -911,7 +905,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v dynamicProvisions = append(dynamicProvisions, claim) } - klog.V(4).Infof("Provisioning for %d claims of pod %q that has no matching volumes on node %q ...", len(claimsToProvision), podName, node.Name) + klog.V(4).InfoS("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node)) return true, true, dynamicProvisions, nil } @@ -977,8 +971,8 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten // TODO (?): this doesn't give any information about which pools where considered and why // they had to be rejected. Log that above? But that might be a lot of log output... - klog.V(4).Infof("Node %q has no accessible CSIStorageCapacity with enough capacity for PVC %s/%s of size %d and storage class %q", - node.Name, claim.Namespace, claim.Name, sizeInBytes, storageClass.Name) + klog.V(4).InfoS("Node has no accessible CSIStorageCapacity with enough capacity for PVC", + "node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass)) return false, nil } @@ -1000,7 +994,7 @@ func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1beta1.CSI selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology) if err != nil { // This should never happen because NodeTopology must be valid. - klog.Errorf("unexpected error converting %+v to a label selector: %v", capacity.NodeTopology, err) + klog.ErrorS(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology) return false } return selector.Matches(labels.Set(node.Labels)) diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go index 54d888e5ae2..b2cdf6edb8d 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go @@ -19,6 +19,7 @@ package volumebinding import ( "context" "fmt" + "os" "reflect" "sort" "testing" @@ -187,7 +188,8 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}, csiStorageCapacity ...b informerFactory.Start(stopCh) for v, synced := range informerFactory.WaitForCacheSync(stopCh) { if !synced { - klog.Fatalf("Error syncing informer for %v", v) + klog.ErrorS(nil, "Error syncing informer", "informer", v) + os.Exit(1) } } @@ -1546,7 +1548,7 @@ func TestBindAPIUpdate(t *testing.T) { testEnv.assumeVolumes(t, "node1", pod, scenario.bindings, scenario.provisionedPVCs) // Execute - err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs) + err := testEnv.internalBinder.bindAPIUpdate(pod, scenario.bindings, scenario.provisionedPVCs) // Validate if !scenario.shouldFail && err != nil { @@ -2087,7 +2089,7 @@ func TestBindPodVolumes(t *testing.T) { go func(scenario scenarioType) { time.Sleep(5 * time.Second) // Sleep a while to run after bindAPIUpdate in BindPodVolumes - klog.V(5).Infof("Running delay function") + klog.V(5).InfoS("Running delay function") scenario.delayFunc(t, testEnv, pod, scenario.initPVs, scenario.initPVCs) }(scenario) } From 6bac4dcaf71b19b5f70e926601846dc269b2fa48 Mon Sep 17 00:00:00 2001 From: Paco Xu Date: Tue, 2 Nov 2021 09:56:21 +0800 Subject: [PATCH 3/4] update etcd makefile to using 3.5.1 for building Signed-off-by: Paco Xu --- build/dependencies.yaml | 2 +- cluster/images/etcd/Makefile | 8 ++++---- cluster/images/etcd/migrate/options.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/dependencies.yaml b/build/dependencies.yaml index c5cbcd2099f..a356751bdad 100644 --- a/build/dependencies.yaml +++ b/build/dependencies.yaml @@ -77,7 +77,7 @@ dependencies: match: const etcdImage - name: "etcd-image" - version: 3.5.0 + version: 3.5.1 refPaths: - path: cluster/images/etcd/Makefile match: BUNDLED_ETCD_VERSIONS\?|LATEST_ETCD_VERSION\? diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 41eb5970ad6..4e96cbbbb87 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -15,7 +15,7 @@ # Build the etcd image # # Usage: -# [BUNDLED_ETCD_VERSIONS=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.0] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) +# [BUNDLED_ETCD_VERSIONS=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.1] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) # # The image contains different etcd versions to simplify # upgrades. Thus be careful when removing any versions from here. @@ -26,15 +26,15 @@ # Except from etcd-$(version) and etcdctl-$(version) binaries, we also # need etcd and etcdctl binaries for backward compatibility reasons. # That binary will be set to the last version from $(BUNDLED_ETCD_VERSIONS). -BUNDLED_ETCD_VERSIONS?=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.0 +BUNDLED_ETCD_VERSIONS?=3.0.17 3.1.12 3.2.24 3.3.17 3.4.13 3.5.1 # LATEST_ETCD_VERSION identifies the most recent etcd version available. -LATEST_ETCD_VERSION?=3.5.0 +LATEST_ETCD_VERSION?=3.5.1 # REVISION provides a version number for this image and all it's bundled # artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment # for each revision of this image at that etcd version. -REVISION?=4 +REVISION?=0 # IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "-". IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION) diff --git a/cluster/images/etcd/migrate/options.go b/cluster/images/etcd/migrate/options.go index 3af751ceadc..f7fdbb844fe 100644 --- a/cluster/images/etcd/migrate/options.go +++ b/cluster/images/etcd/migrate/options.go @@ -28,7 +28,7 @@ import ( ) var ( - supportedEtcdVersions = []string{"3.0.17", "3.1.12", "3.2.24", "3.3.17", "3.4.13", "3.5.0"} + supportedEtcdVersions = []string{"3.0.17", "3.1.12", "3.2.24", "3.3.17", "3.4.13", "3.5.1"} ) const ( From 808c8f42d55c520e45eab93200768607cefc1451 Mon Sep 17 00:00:00 2001 From: Konstantin Misyutin Date: Fri, 10 Sep 2021 18:15:10 +0800 Subject: [PATCH 4/4] Remove StorageObjectInUseProtection feature gate logic This feature has graduated to GA in v1.11 and will always be enabled. So no longe need to check if enabled. Signed-off-by: Konstantin Misyutin --- cmd/kube-controller-manager/app/core.go | 2 - .../volume/persistentvolume/index_test.go | 49 +++++------- .../volume/persistentvolume/pv_controller.go | 8 +- .../volume/persistentvolume/util/util.go | 8 +- .../pvc_protection_controller.go | 14 +--- .../pvc_protection_controller_test.go | 76 ++++++------------- .../pvprotection/pv_protection_controller.go | 16 +--- .../pv_protection_controller_test.go | 43 +++-------- .../desired_state_of_world_populator.go | 22 +++--- .../storageobjectinuseprotection/admission.go | 18 ----- .../admission_test.go | 22 ------ 11 files changed, 72 insertions(+), 206 deletions(-) diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 010c2942554..acaea316867 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -552,7 +552,6 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(), controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"), - utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection), ) if err != nil { return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err) @@ -565,7 +564,6 @@ func startPVProtectionController(ctx context.Context, controllerContext Controll go pvprotection.NewPVProtectionController( controllerContext.InformerFactory.Core().V1().PersistentVolumes(), controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"), - utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection), ).Run(ctx, 1) return nil, true, nil } diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index 3098c6905a6..cc0af4cf61d 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -23,12 +23,9 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes/scheme" ref "k8s.io/client-go/tools/reference" - featuregatetesting "k8s.io/component-base/featuregate/testing" pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume/util" ) @@ -1226,29 +1223,24 @@ func TestStorageObjectInUseProtectionFiltering(t *testing.T) { } satisfyingTestCases := map[string]struct { - isExpectedMatch bool - vol *v1.PersistentVolume - pvc *v1.PersistentVolumeClaim - enableStorageObjectInUseProtection bool + isExpectedMatch bool + vol *v1.PersistentVolume + pvc *v1.PersistentVolumeClaim }{ "pv deletionTimeStamp not set": { - isExpectedMatch: true, - vol: pv, - pvc: pvc, - enableStorageObjectInUseProtection: true, + isExpectedMatch: true, + vol: pv, + pvc: pvc, }, "pv deletionTimeStamp set": { - isExpectedMatch: false, - vol: pvToDelete, - pvc: pvc, - enableStorageObjectInUseProtection: true, + isExpectedMatch: false, + vol: pvToDelete, + pvc: pvc, }, } for name, testCase := range satisfyingTestCases { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)() - err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc) // expected to match but got an error if err != nil && testCase.isExpectedMatch { @@ -1262,28 +1254,23 @@ func TestStorageObjectInUseProtectionFiltering(t *testing.T) { } filteringTestCases := map[string]struct { - isExpectedMatch bool - vol persistentVolumeOrderedIndex - pvc *v1.PersistentVolumeClaim - enableStorageObjectInUseProtection bool + isExpectedMatch bool + vol persistentVolumeOrderedIndex + pvc *v1.PersistentVolumeClaim }{ "pv deletionTimeStamp not set": { - isExpectedMatch: true, - vol: createTestVolOrderedIndex(pv), - pvc: pvc, - enableStorageObjectInUseProtection: true, + isExpectedMatch: true, + vol: createTestVolOrderedIndex(pv), + pvc: pvc, }, "pv deletionTimeStamp set": { - isExpectedMatch: false, - vol: createTestVolOrderedIndex(pvToDelete), - pvc: pvc, - enableStorageObjectInUseProtection: true, + isExpectedMatch: false, + vol: createTestVolOrderedIndex(pvToDelete), + pvc: pvc, }, } for name, testCase := range filteringTestCases { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)() - pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false) // expected to match but either got an error or no returned pvmatch if pvmatch == nil && testCase.isExpectedMatch { diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 2bdb9c1d485..caaea704e43 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" corelisters "k8s.io/client-go/listers/core/v1" @@ -45,7 +44,6 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/events" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics" pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util" - "k8s.io/kubernetes/pkg/features" proxyutil "k8s.io/kubernetes/pkg/proxy/util" "k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" @@ -275,10 +273,8 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo requestedSize := requestedQty.Value() // check if PV's DeletionTimeStamp is set, if so, return error. - if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) { - if volume.ObjectMeta.DeletionTimestamp != nil { - return fmt.Errorf("the volume is marked for deletion %q", volume.Name) - } + if volume.ObjectMeta.DeletionTimestamp != nil { + return fmt.Errorf("the volume is marked for deletion %q", volume.Name) } volumeQty := volume.Spec.Capacity[v1.ResourceStorage] diff --git a/pkg/controller/volume/persistentvolume/util/util.go b/pkg/controller/volume/persistentvolume/util/util.go index bb314134974..df532d550c4 100644 --- a/pkg/controller/volume/persistentvolume/util/util.go +++ b/pkg/controller/volume/persistentvolume/util/util.go @@ -25,12 +25,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes/scheme" storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/tools/reference" storagehelpers "k8s.io/component-helpers/storage/volume" - "k8s.io/kubernetes/pkg/features" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -227,10 +225,8 @@ func FindMatchingVolume( } // check if PV's DeletionTimeStamp is set, if so, skip this volume. - if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) { - if volume.ObjectMeta.DeletionTimestamp != nil { - continue - } + if volume.ObjectMeta.DeletionTimestamp != nil { + continue } nodeAffinityValid := true diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index aa6f4431165..1434f169364 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -53,17 +53,13 @@ type Controller struct { podIndexer cache.Indexer queue workqueue.RateLimitingInterface - - // allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing - storageObjectInUseProtectionEnabled bool } // NewPVCProtectionController returns a new instance of PVCProtectionController. -func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) (*Controller, error) { +func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) { e := &Controller{ - client: cl, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"), - storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled, + client: cl, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"), } if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter()) @@ -189,10 +185,6 @@ func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName strin } func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeClaim) error { - // Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled - if !c.storageObjectInUseProtectionEnabled { - return nil - } claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go index 0246e384991..a0b25061f73 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go @@ -185,31 +185,22 @@ func TestPVCProtectionController(t *testing.T) { deletedPod *v1.Pod // List of expected kubeclient actions that should happen during the // test. - expectedActions []clienttesting.Action - storageObjectInUseProtectionEnabled bool + expectedActions []clienttesting.Action }{ // // PVC events // { - name: "StorageObjectInUseProtection Enabled, PVC without finalizer -> finalizer is added", + name: "PVC without finalizer -> finalizer is added", updatedPVC: pvc(), expectedActions: []clienttesting.Action{ clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { - name: "StorageObjectInUseProtection Disabled, PVC without finalizer -> finalizer is not added", - updatedPVC: pvc(), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: false, - }, - { - name: "PVC with finalizer -> no action", - updatedPVC: withProtectionFinalizer(pvc()), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + name: "PVC with finalizer -> no action", + updatedPVC: withProtectionFinalizer(pvc()), + expectedActions: []clienttesting.Action{}, }, { name: "saving PVC finalizer fails -> controller retries", @@ -229,25 +220,14 @@ func TestPVCProtectionController(t *testing.T) { // This succeeds clienttesting.NewUpdateAction(pvcGVR, defaultNS, withProtectionFinalizer(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { - name: "StorageObjectInUseProtection Enabled, deleted PVC with finalizer -> finalizer is removed", + name: "deleted PVC with finalizer -> finalizer is removed", updatedPVC: deleted(withProtectionFinalizer(pvc())), expectedActions: []clienttesting.Action{ clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), }, - storageObjectInUseProtectionEnabled: true, - }, - { - name: "StorageObjectInUseProtection Disabled, deleted PVC with finalizer -> finalizer is removed", - updatedPVC: deleted(withProtectionFinalizer(pvc())), - expectedActions: []clienttesting.Action{ - clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), - clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), - }, - storageObjectInUseProtectionEnabled: false, }, { name: "finalizer removal fails -> controller retries", @@ -270,7 +250,6 @@ func TestPVCProtectionController(t *testing.T) { // Succeeds clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { name: "deleted PVC with finalizer + pod with the PVC exists -> finalizer is not removed", @@ -290,16 +269,14 @@ func TestPVCProtectionController(t *testing.T) { clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { name: "deleted PVC with finalizer + pod with the PVC finished but is not deleted -> finalizer is not removed", initialObjects: []runtime.Object{ withStatus(v1.PodFailed, withPVC(defaultPVCName, pod())), }, - updatedPVC: deleted(withProtectionFinalizer(pvc())), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + updatedPVC: deleted(withProtectionFinalizer(pvc())), + expectedActions: []clienttesting.Action{}, }, { name: "deleted PVC with finalizer + pod with the PVC exists but is not in the Informer's cache yet -> finalizer is not removed", @@ -311,7 +288,6 @@ func TestPVCProtectionController(t *testing.T) { expectedActions: []clienttesting.Action{ clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), }, - storageObjectInUseProtectionEnabled: true, }, // // Pod events @@ -321,18 +297,16 @@ func TestPVCProtectionController(t *testing.T) { initialObjects: []runtime.Object{ deleted(withProtectionFinalizer(pvc())), }, - updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())), + expectedActions: []clienttesting.Action{}, }, { name: "updated finished Pod -> finalizer is not removed", initialObjects: []runtime.Object{ deleted(withProtectionFinalizer(pvc())), }, - updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())), + expectedActions: []clienttesting.Action{}, }, { name: "updated unscheduled Pod -> finalizer is removed", @@ -344,7 +318,6 @@ func TestPVCProtectionController(t *testing.T) { clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { name: "deleted running Pod -> finalizer is removed", @@ -356,7 +329,6 @@ func TestPVCProtectionController(t *testing.T) { clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { name: "pod delete and create with same namespaced name seen as an update, old pod used deleted PVC -> finalizer is removed", @@ -369,37 +341,33 @@ func TestPVCProtectionController(t *testing.T) { clienttesting.NewListAction(podGVR, podGVK, defaultNS, metav1.ListOptions{}), clienttesting.NewUpdateAction(pvcGVR, defaultNS, deleted(pvc())), }, - storageObjectInUseProtectionEnabled: true, }, { name: "pod delete and create with same namespaced name seen as an update, old pod used non-deleted PVC -> finalizer is not removed", initialObjects: []runtime.Object{ withProtectionFinalizer(pvc()), }, - deletedPod: withPVC(defaultPVCName, pod()), - updatedPod: withUID("uid2", pod()), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + deletedPod: withPVC(defaultPVCName, pod()), + updatedPod: withUID("uid2", pod()), + expectedActions: []clienttesting.Action{}, }, { name: "pod delete and create with same namespaced name seen as an update, both pods reference deleted PVC -> finalizer is not removed", initialObjects: []runtime.Object{ deleted(withProtectionFinalizer(pvc())), }, - deletedPod: withPVC(defaultPVCName, pod()), - updatedPod: withUID("uid2", withPVC(defaultPVCName, pod())), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + deletedPod: withPVC(defaultPVCName, pod()), + updatedPod: withUID("uid2", withPVC(defaultPVCName, pod())), + expectedActions: []clienttesting.Action{}, }, { name: "pod update from unscheduled to scheduled, deleted PVC is referenced -> finalizer is not removed", initialObjects: []runtime.Object{ deleted(withProtectionFinalizer(pvc())), }, - deletedPod: unscheduled(withPVC(defaultPVCName, pod())), - updatedPod: withPVC(defaultPVCName, pod()), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + deletedPod: unscheduled(withPVC(defaultPVCName, pod())), + updatedPod: withPVC(defaultPVCName, pod()), + expectedActions: []clienttesting.Action{}, }, } @@ -431,7 +399,7 @@ func TestPVCProtectionController(t *testing.T) { podInformer := informers.Core().V1().Pods() // Create the controller - ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled) + ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index 0bf3650f56c..86c73cd5c3d 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -47,17 +47,13 @@ type Controller struct { pvListerSynced cache.InformerSynced queue workqueue.RateLimitingInterface - - // allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing - storageObjectInUseProtectionEnabled bool } // NewPVProtectionController returns a new *Controller. -func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller { +func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller { e := &Controller{ - client: cl, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"), - storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled, + client: cl, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"), } if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolume_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter()) @@ -158,10 +154,6 @@ func (c *Controller) processPV(ctx context.Context, pvName string) error { } func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) error { - // Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled - if !c.storageObjectInUseProtectionEnabled { - return nil - } pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{}) diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go index ea522ffee61..b0645781820 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go @@ -25,7 +25,7 @@ import ( "github.com/davecgh/go-spew/spew" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -112,30 +112,21 @@ func TestPVProtectionController(t *testing.T) { updatedPV *v1.PersistentVolume // List of expected kubeclient actions that should happen during the // test. - expectedActions []clienttesting.Action - storageObjectInUseProtectionEnabled bool + expectedActions []clienttesting.Action }{ // PV events // { - name: "StorageObjectInUseProtection Enabled, PV without finalizer -> finalizer is added", + name: "PV without finalizer -> finalizer is added", updatedPV: pv(), expectedActions: []clienttesting.Action{ clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())), }, - storageObjectInUseProtectionEnabled: true, }, { - name: "StorageObjectInUseProtection Disabled, PV without finalizer -> finalizer is added", - updatedPV: pv(), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: false, - }, - { - name: "PVC with finalizer -> no action", - updatedPV: withProtectionFinalizer(pv()), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + name: "PVC with finalizer -> no action", + updatedPV: withProtectionFinalizer(pv()), + expectedActions: []clienttesting.Action{}, }, { name: "saving PVC finalizer fails -> controller retries", @@ -155,23 +146,13 @@ func TestPVProtectionController(t *testing.T) { // This succeeds clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())), }, - storageObjectInUseProtectionEnabled: true, }, { - name: "StorageObjectInUseProtection Enabled, deleted PV with finalizer -> finalizer is removed", + name: "deleted PV with finalizer -> finalizer is removed", updatedPV: deleted(withProtectionFinalizer(pv())), expectedActions: []clienttesting.Action{ clienttesting.NewUpdateAction(pvVer, "", deleted(pv())), }, - storageObjectInUseProtectionEnabled: true, - }, - { - name: "StorageObjectInUseProtection Disabled, deleted PV with finalizer -> finalizer is removed", - updatedPV: deleted(withProtectionFinalizer(pv())), - expectedActions: []clienttesting.Action{ - clienttesting.NewUpdateAction(pvVer, "", deleted(pv())), - }, - storageObjectInUseProtectionEnabled: false, }, { name: "finalizer removal fails -> controller retries", @@ -191,13 +172,11 @@ func TestPVProtectionController(t *testing.T) { // Succeeds clienttesting.NewUpdateAction(pvVer, "", deleted(pv())), }, - storageObjectInUseProtectionEnabled: true, }, { - name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed", - updatedPV: deleted(withProtectionFinalizer(boundPV())), - expectedActions: []clienttesting.Action{}, - storageObjectInUseProtectionEnabled: true, + name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed", + updatedPV: deleted(withProtectionFinalizer(boundPV())), + expectedActions: []clienttesting.Action{}, }, } @@ -231,7 +210,7 @@ func TestPVProtectionController(t *testing.T) { } // Create the controller - ctrl := NewPVProtectionController(pvInformer, client, test.storageObjectInUseProtectionEnabled) + ctrl := NewPVProtectionController(pvInformer, client) // Start the test by simulating an event if test.updatedPV != nil { diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 27ee7ebcd03..fff3c058605 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -570,18 +570,16 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err) } - if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) { - // Pods that uses a PVC that is being deleted must not be started. - // - // In case an old kubelet is running without this check or some kubelets - // have this feature disabled, the worst that can happen is that such - // pod is scheduled. This was the default behavior in 1.8 and earlier - // and users should not be that surprised. - // It should happen only in very rare case when scheduler schedules - // a pod and user deletes a PVC that's used by it at the same time. - if pvc.ObjectMeta.DeletionTimestamp != nil { - return nil, errors.New("PVC is being deleted") - } + // Pods that uses a PVC that is being deleted must not be started. + // + // In case an old kubelet is running without this check or some kubelets + // have this feature disabled, the worst that can happen is that such + // pod is scheduled. This was the default behavior in 1.8 and earlier + // and users should not be that surprised. + // It should happen only in very rare case when scheduler schedules + // a pod and user deletes a PVC that's used by it at the same time. + if pvc.ObjectMeta.DeletionTimestamp != nil { + return nil, errors.New("PVC is being deleted") } if pvc.Status.Phase != v1.ClaimBound { diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go index a3c6a1e5c4b..82ed1ff9cc4 100644 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go +++ b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go @@ -21,11 +21,8 @@ import ( "io" "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/initializer" - "k8s.io/component-base/featuregate" "k8s.io/klog/v2" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/features" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -45,12 +42,9 @@ func Register(plugins *admission.Plugins) { // storageProtectionPlugin holds state for and implements the admission plugin. type storageProtectionPlugin struct { *admission.Handler - - storageObjectInUseProtection bool } var _ admission.Interface = &storageProtectionPlugin{} -var _ initializer.WantsFeatures = &storageProtectionPlugin{} // newPlugin creates a new admission plugin. func newPlugin() *storageProtectionPlugin { @@ -70,10 +64,6 @@ var ( // This prevents users from deleting a PVC that's used by a running pod. // This also prevents admin from deleting a PV that's bound by a PVC func (c *storageProtectionPlugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { - if !c.storageObjectInUseProtection { - return nil - } - switch a.GetResource().GroupResource() { case pvResource: return c.admitPV(a) @@ -129,11 +119,3 @@ func (c *storageProtectionPlugin) admitPVC(a admission.Attributes) error { pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer) return nil } - -func (c *storageProtectionPlugin) InspectFeatureGates(featureGates featuregate.FeatureGate) { - c.storageObjectInUseProtection = featureGates.Enabled(features.StorageObjectInUseProtection) -} - -func (c *storageProtectionPlugin) ValidateInitialization() error { - return nil -} diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go index b48ea070d57..95f6db9a645 100644 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go +++ b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission_test.go @@ -61,7 +61,6 @@ func TestAdmit(t *testing.T) { resource schema.GroupVersionResource object runtime.Object expectedObject runtime.Object - featureEnabled bool namespace string }{ { @@ -69,7 +68,6 @@ func TestAdmit(t *testing.T) { api.SchemeGroupVersion.WithResource("persistentvolumeclaims"), claim, claimWithFinalizer, - true, claim.Namespace, }, { @@ -77,23 +75,13 @@ func TestAdmit(t *testing.T) { api.SchemeGroupVersion.WithResource("persistentvolumeclaims"), claimWithFinalizer, claimWithFinalizer, - true, claimWithFinalizer.Namespace, }, - { - "disabled feature -> no finalizer", - api.SchemeGroupVersion.WithResource("persistentvolumeclaims"), - claim, - claim, - false, - claim.Namespace, - }, { "create -> add finalizer", api.SchemeGroupVersion.WithResource("persistentvolumes"), pv, pvWithFinalizer, - true, pv.Namespace, }, { @@ -101,23 +89,13 @@ func TestAdmit(t *testing.T) { api.SchemeGroupVersion.WithResource("persistentvolumes"), pvWithFinalizer, pvWithFinalizer, - true, pvWithFinalizer.Namespace, }, - { - "disabled feature -> no finalizer", - api.SchemeGroupVersion.WithResource("persistentvolumes"), - pv, - pv, - false, - pv.Namespace, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctrl := newPlugin() - ctrl.storageObjectInUseProtection = test.featureEnabled obj := test.object.DeepCopyObject() attrs := admission.NewAttributesRecord(