diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go
index 238b7df2a66..f8768eda4f3 100644
--- a/cmd/kube-controller-manager/app/certificates.go
+++ b/cmd/kube-controller-manager/app/certificates.go
@@ -193,7 +193,7 @@ func startCSRCleanerController(ctx ControllerContext) (http.Handler, bool, error
}
func startRootCACertPublisher(ctx ControllerContext) (http.Handler, bool, error) {
- if !utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
+ if !utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
return nil, false, nil
}
diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go
index 538bd9d625a..34fd3127a5d 100644
--- a/pkg/controller/certificates/rootcacertpublisher/publisher.go
+++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go
@@ -200,6 +200,8 @@ func (c *Publisher) syncNamespace(ns string) error {
return nil
}
+ // copy so we don't modify the cache's instance of the configmap
+ cm = cm.DeepCopy()
cm.Data = data
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go
index cf75770f360..ab6d4867533 100644
--- a/pkg/features/kube_features.go
+++ b/pkg/features/kube_features.go
@@ -643,6 +643,14 @@ const (
// Add support for the HPA to scale based on metrics from individual containers
// in target pods
HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics"
+
+ // owner: @zshihang
+ // alpha: v1.13
+ // beta: v1.20
+ //
+ // Allows kube-controller-manager to publish kube-root-ca.crt configmap to
+ // every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume.
+ RootCAConfigMap featuregate.Feature = "RootCAConfigMap"
)
func init() {
@@ -740,6 +748,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.Beta},
HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha},
+ RootCAConfigMap: {Default: true, PreRelease: featuregate.Beta},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
diff --git a/pkg/kubeapiserver/options/authentication.go b/pkg/kubeapiserver/options/authentication.go
index d0ec47b5f3b..910ae817493 100644
--- a/pkg/kubeapiserver/options/authentication.go
+++ b/pkg/kubeapiserver/options/authentication.go
@@ -199,6 +199,9 @@ func (o *BuiltInAuthenticationOptions) Validate() []error {
}
}
if o.ServiceAccounts != nil && utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
+ if !utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
+ allErrors = append(allErrors, errors.New("BoundServiceAccountTokenVolume feature depends on RootCAConfigMap feature, but RootCAConfigMap features is not enabled"))
+ }
if len(o.ServiceAccounts.Issuer) == 0 {
allErrors = append(allErrors, errors.New("service-account-issuer is a required flag when BoundServiceAccountTokenVolume is enabled"))
}
diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go
index f11a0412223..2d326c62767 100644
--- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go
+++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go
@@ -402,7 +402,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
})
}
- if utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
+ if utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "root-ca-cert-publisher"},
Rules: []rbacv1.PolicyRule{
diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml
index 96b2cb3f181..462c21ddf67 100644
--- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml
+++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml
@@ -391,6 +391,23 @@ items:
- kind: ServiceAccount
name: resourcequota-controller
namespace: kube-system
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ creationTimestamp: null
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ name: system:controller:root-ca-cert-publisher
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:controller:root-ca-cert-publisher
+ subjects:
+ - kind: ServiceAccount
+ name: root-ca-cert-publisher
+ namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml
index eb01c90a42a..1d61564280c 100644
--- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml
+++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml
@@ -1195,6 +1195,32 @@ items:
- create
- patch
- update
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ creationTimestamp: null
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ name: system:controller:root-ca-cert-publisher
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - update
+ - apiGroups:
+ - ""
+ - events.k8s.io
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh
index e3e2d74184f..d0ff0769017 100755
--- a/test/cmd/apply.sh
+++ b/test/cmd/apply.sh
@@ -310,8 +310,8 @@ __EOF__
kubectl delete -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"
## kubectl apply multiple resources with one failure during builder phase.
- # Pre-Condition: No configmaps
- kube::test::get_object_assert configmaps "{{range.items}}{{${id_field:?}}}:{{end}}" ''
+ # Pre-Condition: No configmaps with name=foo
+ kube::test::get_object_assert 'configmaps --field-selector=metadata.name=foo' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Apply a configmap and a bogus custom resource.
output_message=$(! kubectl apply -f hack/testdata/multi-resource-2.yaml 2>&1 "${kube_flags[@]:?}")
# Should be error message from bogus custom resource.
diff --git a/test/cmd/create.sh b/test/cmd/create.sh
index cc393ca2fe4..20f0e8add96 100755
--- a/test/cmd/create.sh
+++ b/test/cmd/create.sh
@@ -134,8 +134,8 @@ run_kubectl_create_kustomization_directory_tests() {
set -o errexit
## kubectl create -k
for kustomization directory
- # Pre-condition: no ConfigMap, Deployment, Service exist
- kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
+ # Pre-Condition: No configmaps with name=test-the-map, no Deployment, Service exist
+ kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
diff --git a/test/cmd/delete.sh b/test/cmd/delete.sh
index d16b01ed8ed..8de3fdc4255 100755
--- a/test/cmd/delete.sh
+++ b/test/cmd/delete.sh
@@ -37,17 +37,17 @@ run_kubectl_delete_allnamespaces_tests() {
kubectl delete configmap --dry-run=client -l deletetest=true --all-namespaces
kubectl delete configmap --dry-run=server -l deletetest=true --all-namespaces
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
- kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" 'one:'
+ kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" 'one:'
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
- kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" 'two:'
+ kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" 'two:'
kubectl delete configmap -l deletetest=true --all-namespaces
- # no configmaps should be in either of those namespaces
+ # no configmaps should be in either of those namespaces with label deletetest
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
- kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" ''
+ kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
- kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" ''
+ kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
set +o nounset
set +o errexit
diff --git a/test/cmd/get.sh b/test/cmd/get.sh
index d35d385cf8e..de0917d7281 100755
--- a/test/cmd/get.sh
+++ b/test/cmd/get.sh
@@ -206,8 +206,8 @@ run_kubectl_get_tests() {
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
### Test 'kubectl get -k ' prints all the items built from a kustomization directory
- # Pre-condition: no ConfigMap, Deployment, Service exist
- kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
+ # Pre-Condition: No configmaps with name=test-the-map, no Deployment, Service exist
+ kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
@@ -224,7 +224,7 @@ run_kubectl_get_tests() {
kubectl delete -k hack/testdata/kustomize
# Check that all items in the list are deleted
- kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
+ kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go
index 674559d86cd..94ce734da99 100644
--- a/test/e2e/auth/service_accounts.go
+++ b/test/e2e/auth/service_accounts.go
@@ -582,20 +582,6 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})
ginkgo.It("should support InClusterConfig with token rotation [Slow]", func() {
- cfg, err := framework.LoadConfig()
- framework.ExpectNoError(err)
-
- if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "kube-root-ca.crt",
- },
- Data: map[string]string{
- "ca.crt": string(cfg.TLSClientConfig.CAData),
- },
- }, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
- framework.Failf("Unexpected err creating kube-ca-crt: %v", err)
- }
-
tenMin := int64(10 * 60)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
@@ -655,7 +641,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}},
},
}
- pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
+ pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.Logf("created pod")
@@ -870,6 +856,69 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
framework.ExpectEqual(eventFound, true, "failed to find %v event", watch.Deleted)
})
+
+ ginkgo.It("should guarantee kube-root-ca.crt exist in any namespace", func() {
+ const rootCAConfigMapName = "kube-root-ca.crt"
+
+ framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
+ _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
+ if err == nil {
+ return true, nil
+ }
+ if apierrors.IsNotFound(err) {
+ ginkgo.By("root ca configmap not found, retrying")
+ return false, nil
+ }
+ return false, err
+ }))
+ framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name)
+
+ framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)}))
+ framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name)
+
+ framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
+ ginkgo.By("waiting for a new root ca configmap created")
+ _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
+ if err == nil {
+ return true, nil
+ }
+ if apierrors.IsNotFound(err) {
+ ginkgo.By("root ca configmap not found, retrying")
+ return false, nil
+ }
+ return false, err
+ }))
+ framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name)
+
+ _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: rootCAConfigMapName,
+ },
+ Data: map[string]string{
+ "ca.crt": "",
+ },
+ }, metav1.UpdateOptions{})
+ framework.ExpectNoError(err)
+ framework.Logf("Updated root ca configmap in namespace %q", f.Namespace.Name)
+
+ framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
+ ginkgo.By("waiting for the root ca configmap reconciled")
+ cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ ginkgo.By("root ca configmap not found, retrying")
+ return false, nil
+ }
+ return false, err
+ }
+ if value, ok := cm.Data["ca.crt"]; !ok || value == "" {
+ ginkgo.By("root ca configmap is not reconciled yet, retrying")
+ return false, nil
+ }
+ return true, nil
+ }))
+ framework.Logf("Reconciled root ca configmap in namespace %q", f.Namespace.Name)
+ })
})
var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$")