mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 01:40:13 +00:00
separate RootCAConfigMap from BoundServiceAccountTokenVolume
This commit is contained in:
parent
f75316e294
commit
d40f0c43c4
@ -193,7 +193,7 @@ func startCSRCleanerController(ctx ControllerContext) (http.Handler, bool, error
|
||||
}
|
||||
|
||||
func startRootCACertPublisher(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
|
@ -200,6 +200,8 @@ func (c *Publisher) syncNamespace(ns string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy so we don't modify the cache's instance of the configmap
|
||||
cm = cm.DeepCopy()
|
||||
cm.Data = data
|
||||
|
||||
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
|
||||
|
@ -643,6 +643,14 @@ const (
|
||||
// Add support for the HPA to scale based on metrics from individual containers
|
||||
// in target pods
|
||||
HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics"
|
||||
|
||||
// owner: @zshihang
|
||||
// alpha: v1.13
|
||||
// beta: v1.20
|
||||
//
|
||||
// Allows kube-controller-manager to publish kube-root-ca.crt configmap to
|
||||
// every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume.
|
||||
RootCAConfigMap featuregate.Feature = "RootCAConfigMap"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -740,6 +748,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
|
||||
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.Beta},
|
||||
HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha},
|
||||
RootCAConfigMap: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
@ -199,6 +199,9 @@ func (o *BuiltInAuthenticationOptions) Validate() []error {
|
||||
}
|
||||
}
|
||||
if o.ServiceAccounts != nil && utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
|
||||
allErrors = append(allErrors, errors.New("BoundServiceAccountTokenVolume feature depends on RootCAConfigMap feature, but RootCAConfigMap features is not enabled"))
|
||||
}
|
||||
if len(o.ServiceAccounts.Issuer) == 0 {
|
||||
allErrors = append(allErrors, errors.New("service-account-issuer is a required flag when BoundServiceAccountTokenVolume is enabled"))
|
||||
}
|
||||
|
@ -402,7 +402,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
})
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BoundServiceAccountTokenVolume) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RootCAConfigMap) {
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "root-ca-cert-publisher"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
|
@ -391,6 +391,23 @@ items:
|
||||
- kind: ServiceAccount
|
||||
name: resourcequota-controller
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:root-ca-cert-publisher
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:controller:root-ca-cert-publisher
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: root-ca-cert-publisher
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@ -1195,6 +1195,32 @@ items:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:root-ca-cert-publisher
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
- events.k8s.io
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -310,8 +310,8 @@ __EOF__
|
||||
kubectl delete -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply multiple resources with one failure during builder phase.
|
||||
# Pre-Condition: No configmaps
|
||||
kube::test::get_object_assert configmaps "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Pre-Condition: No configmaps with name=foo
|
||||
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=foo' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Apply a configmap and a bogus custom resource.
|
||||
output_message=$(! kubectl apply -f hack/testdata/multi-resource-2.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
# Should be error message from bogus custom resource.
|
||||
|
@ -134,8 +134,8 @@ run_kubectl_create_kustomization_directory_tests() {
|
||||
set -o errexit
|
||||
|
||||
## kubectl create -k <dir> for kustomization directory
|
||||
# Pre-condition: no ConfigMap, Deployment, Service exist
|
||||
kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Pre-Condition: No configmaps with name=test-the-map, no Deployment, Service exist
|
||||
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
|
@ -37,17 +37,17 @@ run_kubectl_delete_allnamespaces_tests() {
|
||||
kubectl delete configmap --dry-run=client -l deletetest=true --all-namespaces
|
||||
kubectl delete configmap --dry-run=server -l deletetest=true --all-namespaces
|
||||
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
|
||||
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" 'one:'
|
||||
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" 'one:'
|
||||
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
|
||||
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" 'two:'
|
||||
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" 'two:'
|
||||
|
||||
kubectl delete configmap -l deletetest=true --all-namespaces
|
||||
|
||||
# no configmaps should be in either of those namespaces
|
||||
# no configmaps should be in either of those namespaces with label deletetest
|
||||
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
|
||||
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
|
||||
kube::test::get_object_assert configmap "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert 'configmap -l deletetest' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
|
@ -206,8 +206,8 @@ run_kubectl_get_tests() {
|
||||
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
|
||||
|
||||
### Test 'kubectl get -k <dir>' prints all the items built from a kustomization directory
|
||||
# Pre-condition: no ConfigMap, Deployment, Service exist
|
||||
kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Pre-Condition: No configmaps with name=test-the-map, no Deployment, Service exist
|
||||
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
@ -224,7 +224,7 @@ run_kubectl_get_tests() {
|
||||
kubectl delete -k hack/testdata/kustomize
|
||||
|
||||
# Check that all items in the list are deleted
|
||||
kube::test::get_object_assert configmaps "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert 'configmaps --field-selector=metadata.name=test-the-map' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -582,20 +582,6 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should support InClusterConfig with token rotation [Slow]", func() {
|
||||
cfg, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-root-ca.crt",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"ca.crt": string(cfg.TLSClientConfig.CAData),
|
||||
},
|
||||
}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
framework.Failf("Unexpected err creating kube-ca-crt: %v", err)
|
||||
}
|
||||
|
||||
tenMin := int64(10 * 60)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
|
||||
@ -655,7 +641,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}},
|
||||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("created pod")
|
||||
@ -870,6 +856,69 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
framework.ExpectEqual(eventFound, true, "failed to find %v event", watch.Deleted)
|
||||
})
|
||||
|
||||
ginkgo.It("should guarantee kube-root-ca.crt exist in any namespace", func() {
|
||||
const rootCAConfigMapName = "kube-root-ca.crt"
|
||||
|
||||
framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
ginkgo.By("root ca configmap not found, retrying")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}))
|
||||
framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name)
|
||||
|
||||
framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)}))
|
||||
framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name)
|
||||
|
||||
framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
ginkgo.By("waiting for a new root ca configmap created")
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
ginkgo.By("root ca configmap not found, retrying")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}))
|
||||
framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name)
|
||||
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rootCAConfigMapName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"ca.crt": "",
|
||||
},
|
||||
}, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Updated root ca configmap in namespace %q", f.Namespace.Name)
|
||||
|
||||
framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
ginkgo.By("waiting for the root ca configmap reconciled")
|
||||
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
ginkgo.By("root ca configmap not found, retrying")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if value, ok := cm.Data["ca.crt"]; !ok || value == "" {
|
||||
ginkgo.By("root ca configmap is not reconciled yet, retrying")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}))
|
||||
framework.Logf("Reconciled root ca configmap in namespace %q", f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$")
|
||||
|
Loading…
Reference in New Issue
Block a user