mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
track legacy service account tokens
This commit is contained in:
parent
7ad4b04632
commit
569cd70a52
211
pkg/controlplane/controller/legacytokentracking/controller.go
Normal file
211
pkg/controlplane/controller/legacytokentracking/controller.go
Normal file
@ -0,0 +1,211 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package legacytokentracking
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
ConfigMapName = "kube-apiserver-legacy-service-account-token-tracking"
|
||||
ConfigMapDataKey = "since"
|
||||
dateFormat = "2006-01-02"
|
||||
)
|
||||
|
||||
var (
|
||||
queueKey = metav1.NamespaceSystem + "/" + ConfigMapName
|
||||
)
|
||||
|
||||
// Controller maintains a timestamp value configmap `kube-apiserver-legacy-service-account-token-tracking`
|
||||
// in `kube-system` to indicates if the tracking for legacy tokens is enabled in
|
||||
// the cluster. For HA clusters, the configmap will be eventually created after
|
||||
// all controller instances have enabled the feature. When disabling this
|
||||
// feature, existing configmap will be deleted.
|
||||
type Controller struct {
|
||||
configMapClient corev1client.ConfigMapsGetter
|
||||
configMapInformer cache.SharedIndexInformer
|
||||
configMapCache cache.Indexer
|
||||
configMapSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// enabled controls the behavior of the controller: if enabled is true, the
|
||||
//configmap will be created; otherwise, the configmap will be deleted.
|
||||
enabled bool
|
||||
// rate limiter controls the rate limit of the creation of the configmap.
|
||||
// this is useful in multi-apiserver cluster to prevent config existing in a
|
||||
// cluster with mixed enabled/disabled controllers. otherwise, those
|
||||
// apiservers will fight to create/delete until all apiservers are enabled
|
||||
// or disabled.
|
||||
creationRatelimiter *rate.Limiter
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewController returns a Controller struct.
|
||||
func NewController(cs kubernetes.Interface) *Controller {
|
||||
return newController(cs, clock.RealClock{}, rate.NewLimiter(rate.Every(30*time.Minute), 1))
|
||||
}
|
||||
|
||||
func newController(cs kubernetes.Interface, cl clock.Clock, limiter *rate.Limiter) *Controller {
|
||||
informer := corev1informers.NewFilteredConfigMapInformer(cs, metav1.NamespaceSystem, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", ConfigMapName).String()
|
||||
})
|
||||
|
||||
c := &Controller{
|
||||
configMapClient: cs.CoreV1(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "legacy_token_tracking_controller"),
|
||||
configMapInformer: informer,
|
||||
configMapCache: informer.GetIndexer(),
|
||||
configMapSynced: informer.HasSynced,
|
||||
enabled: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyServiceAccountTokenTracking),
|
||||
creationRatelimiter: limiter,
|
||||
clock: cl,
|
||||
}
|
||||
|
||||
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
c.enqueue()
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
c.enqueue()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
c.enqueue()
|
||||
},
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Controller) enqueue() {
|
||||
c.queue.Add(queueKey)
|
||||
}
|
||||
|
||||
// Run starts the controller sync loop.
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
klog.Info("Starting legacy_token_tracking_controller")
|
||||
defer klog.Infof("Shutting down legacy_token_tracking_controller")
|
||||
|
||||
go c.configMapInformer.Run(stopCh)
|
||||
if !cache.WaitForNamedCacheSync("configmaps", stopCh, c.configMapSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
|
||||
c.queue.Add(queueKey)
|
||||
|
||||
<-stopCh
|
||||
klog.Info("Ending legacy_token_tracking_controller")
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNext() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNext() bool {
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
if err := c.syncConfigMap(); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("while syncing ConfigMap %q, err: %w", key, err))
|
||||
c.queue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) syncConfigMap() error {
|
||||
obj, exists, err := c.configMapCache.GetByKey(queueKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := c.clock.Now()
|
||||
switch {
|
||||
case c.enabled:
|
||||
if !exists {
|
||||
r := c.creationRatelimiter.ReserveN(now, 1)
|
||||
if delay := r.DelayFrom(now); delay > 0 {
|
||||
c.queue.AddAfter(queueKey, delay)
|
||||
r.CancelAt(now)
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.UTC().Format(dateFormat)},
|
||||
}, metav1.CreateOptions{}); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
// don't consume the creationRatelimiter for an unsuccessful attempt
|
||||
r.CancelAt(now)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
configMap := obj.(*corev1.ConfigMap)
|
||||
if _, err = time.Parse(dateFormat, configMap.Data[ConfigMapDataKey]); err != nil {
|
||||
configMap := configMap.DeepCopy()
|
||||
configMap.Data[ConfigMapDataKey] = now.UTC().Format(dateFormat)
|
||||
if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case !c.enabled:
|
||||
if exists && obj.(*corev1.ConfigMap).DeletionTimestamp == nil {
|
||||
if err := c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), ConfigMapName, metav1.DeleteOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,195 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package legacytokentracking
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"golang.org/x/time/rate"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
const throttlePeriod = 30 * time.Second
|
||||
|
||||
func TestSyncConfigMap(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
tests := []struct {
|
||||
name string
|
||||
enabled bool
|
||||
nextCreateAt []time.Time
|
||||
clientObjects []runtime.Object
|
||||
existingConfigMap *corev1.ConfigMap
|
||||
|
||||
expectedErr error
|
||||
expectedActions []core.Action
|
||||
}{
|
||||
{
|
||||
name: "create configmap [no cache, no live object]",
|
||||
enabled: true,
|
||||
clientObjects: []runtime.Object{},
|
||||
expectedActions: []core.Action{
|
||||
core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create configmap should ignore AlreadyExists error [no cache, live object exists]",
|
||||
enabled: true,
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create configmap throttled [no cache, no live object]",
|
||||
enabled: true,
|
||||
nextCreateAt: []time.Time{now.Add(throttlePeriod - 2*time.Second), now.Add(throttlePeriod - time.Second)},
|
||||
clientObjects: []runtime.Object{},
|
||||
expectedActions: []core.Action{
|
||||
core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create configmap after throttle period [no cache, no live object]",
|
||||
enabled: true,
|
||||
nextCreateAt: []time.Time{now.Add(throttlePeriod - 2*time.Second), now.Add(throttlePeriod - time.Second), now.Add(throttlePeriod + time.Second)},
|
||||
clientObjects: []runtime.Object{},
|
||||
expectedActions: []core.Action{
|
||||
core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Add(throttlePeriod + time.Second).Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skip update configmap [cache with expected date format exists, live object exists]",
|
||||
enabled: true,
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}},
|
||||
},
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{},
|
||||
},
|
||||
{
|
||||
name: "update configmap [cache with unexpected date format, live object exists]",
|
||||
enabled: true,
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)}},
|
||||
},
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update configmap should ignore NotFound error [cache with unexpected date format, no live object]",
|
||||
enabled: true,
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: "BAD_TIMESTAMP"},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete configmap [no cache, no live object]",
|
||||
expectedActions: []core.Action{},
|
||||
},
|
||||
{
|
||||
name: "delete configmap [cache exists, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)}},
|
||||
},
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, ConfigMapName),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete configmap that's alrady being deleted [cache exists, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)}},
|
||||
},
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName, DeletionTimestamp: &metav1.Time{Time: time.Now()}},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{},
|
||||
},
|
||||
{
|
||||
name: "delete configmap should ignore NotFound error [cache exists, no live object]",
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, ConfigMapName),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LegacyServiceAccountTokenTracking, test.enabled)()
|
||||
|
||||
client := fake.NewSimpleClientset(test.clientObjects...)
|
||||
limiter := rate.NewLimiter(rate.Every(throttlePeriod), 1)
|
||||
controller := newController(client, testingclock.NewFakeClock(now), limiter)
|
||||
if test.existingConfigMap != nil {
|
||||
controller.configMapCache.Add(test.existingConfigMap)
|
||||
}
|
||||
|
||||
if err := controller.syncConfigMap(); err != nil {
|
||||
t.Errorf("Failed to sync ConfigMap, err: %v", err)
|
||||
}
|
||||
|
||||
for _, createAt := range test.nextCreateAt {
|
||||
// delete the existing configmap to trigger second create
|
||||
controller.configMapCache.Delete(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
})
|
||||
controller.clock.(*testingclock.FakeClock).SetTime(createAt)
|
||||
if err := controller.syncConfigMap(); err != nil {
|
||||
t.Errorf("Failed to sync ConfigMap, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(test.expectedActions, client.Actions()); diff != "" {
|
||||
t.Errorf("Unexpected diff (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -79,6 +79,7 @@ import (
|
||||
flowcontrolv1beta3 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta3"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/apiserverleasegc"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/clusterauthenticationtrust"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking"
|
||||
"k8s.io/kubernetes/pkg/controlplane/reconcilers"
|
||||
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
@ -496,6 +497,15 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
|
||||
})
|
||||
}
|
||||
|
||||
m.GenericAPIServer.AddPostStartHookOrDie("start-legacy-token-tracking-controller", func(hookContext genericapiserver.PostStartHookContext) error {
|
||||
kubeClient, err := kubernetes.NewForConfig(hookContext.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go legacytokentracking.NewController(kubeClient).Run(hookContext.StopCh)
|
||||
return nil
|
||||
})
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
@ -495,6 +495,13 @@ const (
|
||||
// Stop auto-generation of secret-based service account tokens.
|
||||
LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration"
|
||||
|
||||
// owner: @zshihang
|
||||
// kep: http://kep.k8s.io/2800
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables tracking of secret-based service account tokens usage.
|
||||
LegacyServiceAccountTokenTracking featuregate.Feature = "LegacyServiceAccountTokenTracking"
|
||||
|
||||
// owner: @jinxu
|
||||
// beta: v1.10
|
||||
// stable: v1.25
|
||||
@ -932,6 +939,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
LegacyServiceAccountTokenTracking: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
webhookutil "k8s.io/apiserver/pkg/util/webhook"
|
||||
"k8s.io/apiserver/plugin/pkg/authenticator/token/oidc"
|
||||
"k8s.io/apiserver/plugin/pkg/authenticator/token/webhook"
|
||||
typedv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
// Initialize all known client auth plugins.
|
||||
@ -80,6 +81,7 @@ type Config struct {
|
||||
|
||||
// TODO, this is the only non-serializable part of the entire config. Factor it out into a clientconfig
|
||||
ServiceAccountTokenGetter serviceaccount.ServiceAccountTokenGetter
|
||||
SecretsWriter typedv1core.SecretsGetter
|
||||
BootstrapTokenAuthenticator authenticator.Token
|
||||
// ClientCAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users.
|
||||
// Generally this is the CA bundle file used to authenticate client certificates
|
||||
@ -125,7 +127,7 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er
|
||||
tokenAuthenticators = append(tokenAuthenticators, authenticator.WrapAudienceAgnosticToken(config.APIAudiences, tokenAuth))
|
||||
}
|
||||
if len(config.ServiceAccountKeyFiles) > 0 {
|
||||
serviceAccountAuth, err := newLegacyServiceAccountAuthenticator(config.ServiceAccountKeyFiles, config.ServiceAccountLookup, config.APIAudiences, config.ServiceAccountTokenGetter)
|
||||
serviceAccountAuth, err := newLegacyServiceAccountAuthenticator(config.ServiceAccountKeyFiles, config.ServiceAccountLookup, config.APIAudiences, config.ServiceAccountTokenGetter, config.SecretsWriter)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -266,7 +268,7 @@ func newAuthenticatorFromOIDCIssuerURL(opts oidc.Options) (authenticator.Token,
|
||||
}
|
||||
|
||||
// newLegacyServiceAccountAuthenticator returns an authenticator.Token or an error
|
||||
func newLegacyServiceAccountAuthenticator(keyfiles []string, lookup bool, apiAudiences authenticator.Audiences, serviceAccountGetter serviceaccount.ServiceAccountTokenGetter) (authenticator.Token, error) {
|
||||
func newLegacyServiceAccountAuthenticator(keyfiles []string, lookup bool, apiAudiences authenticator.Audiences, serviceAccountGetter serviceaccount.ServiceAccountTokenGetter, secretsWriter typedv1core.SecretsGetter) (authenticator.Token, error) {
|
||||
allPublicKeys := []interface{}{}
|
||||
for _, keyfile := range keyfiles {
|
||||
publicKeys, err := keyutil.PublicKeysFromFile(keyfile)
|
||||
@ -276,7 +278,7 @@ func newLegacyServiceAccountAuthenticator(keyfiles []string, lookup bool, apiAud
|
||||
allPublicKeys = append(allPublicKeys, publicKeys...)
|
||||
}
|
||||
|
||||
tokenAuthenticator := serviceaccount.JWTTokenAuthenticator([]string{serviceaccount.LegacyIssuer}, allPublicKeys, apiAudiences, serviceaccount.NewLegacyValidator(lookup, serviceAccountGetter))
|
||||
tokenAuthenticator := serviceaccount.JWTTokenAuthenticator([]string{serviceaccount.LegacyIssuer}, allPublicKeys, apiAudiences, serviceaccount.NewLegacyValidator(lookup, serviceAccountGetter, secretsWriter))
|
||||
return tokenAuthenticator, nil
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
openapicommon "k8s.io/kube-openapi/pkg/common"
|
||||
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
|
||||
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
|
||||
@ -484,6 +483,7 @@ func (o *BuiltInAuthenticationOptions) ApplyTo(authInfo *genericapiserver.Authen
|
||||
versionedInformer.Core().V1().ServiceAccounts().Lister(),
|
||||
versionedInformer.Core().V1().Pods().Lister(),
|
||||
)
|
||||
authenticatorConfig.SecretsWriter = extclient.CoreV1()
|
||||
|
||||
authenticatorConfig.BootstrapTokenAuthenticator = bootstrap.NewTokenAuthenticator(
|
||||
versionedInformer.Core().V1().Secrets().Lister().Secrets(metav1.NamespaceSystem),
|
||||
|
@ -342,7 +342,7 @@ func TestTokenGenerateAndValidate(t *testing.T) {
|
||||
return tc.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
})),
|
||||
)
|
||||
authn := serviceaccount.JWTTokenAuthenticator([]string{serviceaccount.LegacyIssuer, "bar"}, tc.Keys, auds, serviceaccount.NewLegacyValidator(tc.Client != nil, getter))
|
||||
authn := serviceaccount.JWTTokenAuthenticator([]string{serviceaccount.LegacyIssuer, "bar"}, tc.Keys, auds, serviceaccount.NewLegacyValidator(tc.Client != nil, getter, nil))
|
||||
|
||||
// An invalid, non-JWT token should always fail
|
||||
ctx := authenticator.WithAudiences(context.Background(), auds)
|
||||
|
@ -19,14 +19,22 @@ package serviceaccount
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
applyv1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
typedv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Claims, interface{}) {
|
||||
@ -40,7 +48,10 @@ func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Clai
|
||||
}
|
||||
}
|
||||
|
||||
const LegacyIssuer = "kubernetes/serviceaccount"
|
||||
const (
|
||||
LegacyIssuer = "kubernetes/serviceaccount"
|
||||
LastUsedLabelKey = "kubernetes.io/legacy-token-last-used"
|
||||
)
|
||||
|
||||
type legacyPrivateClaims struct {
|
||||
ServiceAccountName string `json:"kubernetes.io/serviceaccount/service-account.name"`
|
||||
@ -49,16 +60,18 @@ type legacyPrivateClaims struct {
|
||||
Namespace string `json:"kubernetes.io/serviceaccount/namespace"`
|
||||
}
|
||||
|
||||
func NewLegacyValidator(lookup bool, getter ServiceAccountTokenGetter) Validator {
|
||||
func NewLegacyValidator(lookup bool, getter ServiceAccountTokenGetter, secretsWriter typedv1core.SecretsGetter) Validator {
|
||||
return &legacyValidator{
|
||||
lookup: lookup,
|
||||
getter: getter,
|
||||
lookup: lookup,
|
||||
getter: getter,
|
||||
secretsWriter: secretsWriter,
|
||||
}
|
||||
}
|
||||
|
||||
type legacyValidator struct {
|
||||
lookup bool
|
||||
getter ServiceAccountTokenGetter
|
||||
lookup bool
|
||||
getter ServiceAccountTokenGetter
|
||||
secretsWriter typedv1core.SecretsGetter
|
||||
}
|
||||
|
||||
var _ = Validator(&legacyValidator{})
|
||||
@ -126,6 +139,29 @@ func (v *legacyValidator) Validate(ctx context.Context, tokenData string, public
|
||||
klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
|
||||
return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyServiceAccountTokenTracking) {
|
||||
for _, ref := range serviceAccount.Secrets {
|
||||
if ref.Name == secret.Name {
|
||||
warning.AddWarning(ctx, "", "Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens.")
|
||||
break
|
||||
}
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
today := now.Format("2006-01-02")
|
||||
tomorrow := now.AddDate(0, 0, 1).Format("2006-01-02")
|
||||
lastUsed := secret.Labels[LastUsedLabelKey]
|
||||
if lastUsed != today && lastUsed != tomorrow {
|
||||
patchContent, err := json.Marshal(applyv1.Secret(secret.Name, secret.Namespace).WithLabels(map[string]string{LastUsedLabelKey: today}))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to marshal legacy service account token tracking labels, err: %v", err)
|
||||
} else {
|
||||
if _, err := v.secretsWriter.Secrets(namespace).Patch(ctx, secret.Name, types.MergePatchType, patchContent, metav1.PatchOptions{}); err != nil {
|
||||
klog.Errorf("Failed to label legacy service account token secret with last-used, err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &apiserverserviceaccount.ServiceAccountInfo{
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
@ -59,6 +60,12 @@ type cacheRecord struct {
|
||||
// based on the current time, but that may be okay since cache TTLs are generally
|
||||
// small (seconds).
|
||||
annotations map[string]string
|
||||
warnings []*cacheWarning
|
||||
}
|
||||
|
||||
type cacheWarning struct {
|
||||
agent string
|
||||
text string
|
||||
}
|
||||
|
||||
type cachedTokenAuthenticator struct {
|
||||
@ -128,6 +135,9 @@ func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token
|
||||
for key, value := range record.annotations {
|
||||
audit.AddAuditAnnotation(ctx, key, value)
|
||||
}
|
||||
for _, w := range record.warnings {
|
||||
warning.AddWarning(ctx, w.agent, w.text)
|
||||
}
|
||||
return record.resp, true, nil
|
||||
}
|
||||
|
||||
@ -184,6 +194,8 @@ func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, toke
|
||||
if audsOk {
|
||||
ctx = authenticator.WithAudiences(ctx, auds)
|
||||
}
|
||||
recorder := &recorder{}
|
||||
ctx = warning.WithWarningRecorder(ctx, recorder)
|
||||
|
||||
// since this is shared work between multiple requests, we have no way of knowing if any
|
||||
// particular request supports audit annotations. thus we always attempt to record them.
|
||||
@ -192,6 +204,7 @@ func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, toke
|
||||
|
||||
record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token)
|
||||
record.annotations = ev.Annotations
|
||||
record.warnings = recorder.extractWarnings()
|
||||
|
||||
if !a.cacheErrs && record.err != nil {
|
||||
return record, nil
|
||||
@ -269,3 +282,24 @@ func toBytes(s string) []byte {
|
||||
func toString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// simple recorder that only appends warning
|
||||
type recorder struct {
|
||||
mu sync.Mutex
|
||||
warnings []*cacheWarning
|
||||
}
|
||||
|
||||
// AddWarning adds a warning to recorder.
|
||||
func (r *recorder) AddWarning(agent, text string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.warnings = append(r.warnings, &cacheWarning{agent: agent, text: text})
|
||||
}
|
||||
|
||||
func (r *recorder) extractWarnings() []*cacheWarning {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
warnings := r.warnings
|
||||
r.warnings = nil
|
||||
return warnings
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
type key int
|
||||
|
||||
const (
|
||||
// auditAnnotationsKey is the context key for the audit annotations.
|
||||
// warningRecorderKey is the context key for the warning recorder.
|
||||
warningRecorderKey key = iota
|
||||
)
|
||||
|
||||
@ -41,6 +41,7 @@ type Recorder interface {
|
||||
func WithWarningRecorder(ctx context.Context, recorder Recorder) context.Context {
|
||||
return context.WithValue(ctx, warningRecorderKey, recorder)
|
||||
}
|
||||
|
||||
func warningRecorderFrom(ctx context.Context) (Recorder, bool) {
|
||||
recorder, ok := ctx.Value(warningRecorderKey).(Recorder)
|
||||
return recorder, ok
|
||||
|
@ -23,6 +23,7 @@ package serviceaccount
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -44,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/controlplane"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
@ -53,6 +55,8 @@ import (
|
||||
const (
|
||||
readOnlyServiceAccountName = "ro"
|
||||
readWriteServiceAccountName = "rw"
|
||||
|
||||
dateFormat = "2006-01-02"
|
||||
)
|
||||
|
||||
func TestServiceAccountAutoCreate(t *testing.T) {
|
||||
@ -310,6 +314,112 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
||||
doServiceAccountAPIRequests(t, rwClient, otherns, true, false, false)
|
||||
}
|
||||
|
||||
func TestLegacyServiceAccountTokenTracking(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.LegacyServiceAccountTokenNoAutoGeneration, false)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.LegacyServiceAccountTokenTracking, true)()
|
||||
c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t)
|
||||
defer stopFunc()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup ServiceAccounts server: %v", err)
|
||||
}
|
||||
|
||||
// create service account
|
||||
myns := "auth-ns"
|
||||
_, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{})
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
mysa, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
manualSecretName := "manual-token"
|
||||
manualSecret, err := createServiceAccountToken(c, mysa, myns, manualSecretName)
|
||||
if err != nil {
|
||||
t.Fatalf("Secret not created: %v", err)
|
||||
}
|
||||
|
||||
autoSecretName, autoSecretTokenData, err := getReferencedServiceAccountToken(c, myns, readOnlyServiceAccountName, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
secretName string
|
||||
secretTokenData string
|
||||
|
||||
expectWarning bool
|
||||
}{
|
||||
{
|
||||
name: "manually created legacy token",
|
||||
secretName: manualSecretName,
|
||||
secretTokenData: string(manualSecret.Data[v1.ServiceAccountTokenKey]),
|
||||
},
|
||||
{
|
||||
name: "auto created legacy token",
|
||||
secretName: autoSecretName,
|
||||
secretTokenData: autoSecretTokenData,
|
||||
expectWarning: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
myConfig := *config
|
||||
wh := &warningHandler{}
|
||||
myConfig.WarningHandler = wh
|
||||
myConfig.BearerToken = string(test.secretTokenData)
|
||||
roClient := clientset.NewForConfigOrDie(&myConfig)
|
||||
dateBefore := time.Now().UTC().Format(dateFormat)
|
||||
go func() {
|
||||
doServiceAccountAPIRequests(t, roClient, myns, true, true, false)
|
||||
}()
|
||||
doServiceAccountAPIRequests(t, roClient, myns, true, true, false)
|
||||
dateAfter := time.Now().UTC().Format(dateFormat)
|
||||
liveSecret, err := c.CoreV1().Secrets(myns).Get(context.TODO(), test.secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get secret: %v", err)
|
||||
}
|
||||
|
||||
if test.expectWarning && len(wh.warnings) != 8 {
|
||||
t.Fatalf("Expect 8 warnings, got %d", len(wh.warnings))
|
||||
}
|
||||
if !test.expectWarning && len(wh.warnings) != 0 {
|
||||
t.Fatalf("Don't expect warnings, got %d", len(wh.warnings))
|
||||
}
|
||||
|
||||
// authenticated legacy token should have the expected annotation and label.
|
||||
date, ok := liveSecret.GetLabels()[serviceaccount.LastUsedLabelKey]
|
||||
if !ok {
|
||||
t.Fatalf("Secret wasn't labeled with %q", serviceaccount.LastUsedLabelKey)
|
||||
}
|
||||
if date != dateBefore || date != dateAfter {
|
||||
t.Fatalf("Secret was labeled with wrong date: %q", date)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// configmap should exist with 'since' timestamp.
|
||||
if err = wait.PollImmediate(time.Millisecond*10, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
dateBefore := time.Now().UTC().Format("2006-01-02")
|
||||
configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), legacytokentracking.ConfigMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get %q configmap, err %w", legacytokentracking.ConfigMapDataKey, err)
|
||||
}
|
||||
dateAfter := time.Now().UTC().Format("2006-01-02")
|
||||
date, ok := configMap.Data[legacytokentracking.ConfigMapDataKey]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("configMap doesn't contain key %q", legacytokentracking.ConfigMapDataKey)
|
||||
}
|
||||
if date != dateBefore || date != dateAfter {
|
||||
return false, fmt.Errorf("configMap contains a wrong date %q", date)
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// startServiceAccountTestServerAndWaitForCaches returns a started server
|
||||
// It is the responsibility of the caller to ensure the returned stopFunc is called
|
||||
func startServiceAccountTestServerAndWaitForCaches(t *testing.T) (clientset.Interface, *restclient.Config, func(), error) {
|
||||
@ -571,3 +681,14 @@ func doServiceAccountAPIRequests(t *testing.T, c clientset.Interface, ns string,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type warningHandler struct {
|
||||
mu sync.Mutex
|
||||
warnings []string
|
||||
}
|
||||
|
||||
func (r *warningHandler) HandleWarningHeader(code int, agent string, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.warnings = append(r.warnings, message)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user