mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 20:17:41 +00:00
Merge pull request #117591 from zshihang/master
graduate LegacyServiceAccountTokenTracking to GA
This commit is contained in:
commit
d2dc976247
@ -29,14 +29,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@ -62,9 +60,6 @@ type Controller struct {
|
||||
configMapSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// enabled controls the behavior of the controller: if enabled is true, the
|
||||
//configmap will be created; otherwise, the configmap will be deleted.
|
||||
enabled bool
|
||||
// rate limiter controls the rate limit of the creation of the configmap.
|
||||
// this is useful in multi-apiserver cluster to prevent config existing in a
|
||||
// cluster with mixed enabled/disabled controllers. otherwise, those
|
||||
@ -90,7 +85,6 @@ func newController(cs kubernetes.Interface, cl clock.Clock, limiter *rate.Limite
|
||||
configMapInformer: informer,
|
||||
configMapCache: informer.GetIndexer(),
|
||||
configMapSynced: informer.HasSynced,
|
||||
enabled: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyServiceAccountTokenTracking),
|
||||
creationRatelimiter: limiter,
|
||||
clock: cl,
|
||||
}
|
||||
@ -163,53 +157,41 @@ func (c *Controller) syncConfigMap() error {
|
||||
}
|
||||
|
||||
now := c.clock.Now()
|
||||
switch {
|
||||
case c.enabled:
|
||||
if !exists {
|
||||
r := c.creationRatelimiter.ReserveN(now, 1)
|
||||
if delay := r.DelayFrom(now); delay > 0 {
|
||||
c.queue.AddAfter(queueKey, delay)
|
||||
r.CancelAt(now)
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.UTC().Format(dateFormat)},
|
||||
}, metav1.CreateOptions{}); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
// don't consume the creationRatelimiter for an unsuccessful attempt
|
||||
r.CancelAt(now)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
configMap := obj.(*corev1.ConfigMap)
|
||||
if _, err = time.Parse(dateFormat, configMap.Data[ConfigMapDataKey]); err != nil {
|
||||
configMap := configMap.DeepCopy()
|
||||
if configMap.Data == nil {
|
||||
configMap.Data = map[string]string{}
|
||||
}
|
||||
configMap.Data[ConfigMapDataKey] = now.UTC().Format(dateFormat)
|
||||
if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
r := c.creationRatelimiter.ReserveN(now, 1)
|
||||
if delay := r.DelayFrom(now); delay > 0 {
|
||||
c.queue.AddAfter(queueKey, delay)
|
||||
r.CancelAt(now)
|
||||
return nil
|
||||
}
|
||||
|
||||
case !c.enabled:
|
||||
if exists && obj.(*corev1.ConfigMap).DeletionTimestamp == nil {
|
||||
if err := c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), ConfigMapName, metav1.DeleteOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.UTC().Format(dateFormat)},
|
||||
}, metav1.CreateOptions{}); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
// don't consume the creationRatelimiter for an unsuccessful attempt
|
||||
r.CancelAt(now)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
configMap := obj.(*corev1.ConfigMap)
|
||||
if _, err = time.Parse(dateFormat, configMap.Data[ConfigMapDataKey]); err != nil {
|
||||
configMap := configMap.DeepCopy()
|
||||
if configMap.Data == nil {
|
||||
configMap.Data = map[string]string{}
|
||||
}
|
||||
configMap.Data[ConfigMapDataKey] = now.UTC().Format(dateFormat)
|
||||
if _, err = c.configMapClient.ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -27,11 +27,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
@ -41,7 +38,6 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
tests := []struct {
|
||||
name string
|
||||
enabled bool
|
||||
nextCreateAt []time.Time
|
||||
clientObjects []runtime.Object
|
||||
existingConfigMap *corev1.ConfigMap
|
||||
@ -51,15 +47,13 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "create configmap [no cache, no live object]",
|
||||
enabled: true,
|
||||
clientObjects: []runtime.Object{},
|
||||
expectedActions: []core.Action{
|
||||
core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create configmap should ignore AlreadyExists error [no cache, live object exists]",
|
||||
enabled: true,
|
||||
name: "create configmap should ignore AlreadyExists error [no cache, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}},
|
||||
},
|
||||
@ -69,7 +63,6 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "create configmap throttled [no cache, no live object]",
|
||||
enabled: true,
|
||||
nextCreateAt: []time.Time{now.Add(throttlePeriod - 2*time.Second), now.Add(throttlePeriod - time.Second)},
|
||||
clientObjects: []runtime.Object{},
|
||||
expectedActions: []core.Action{
|
||||
@ -78,7 +71,6 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "create configmap after throttle period [no cache, no live object]",
|
||||
enabled: true,
|
||||
nextCreateAt: []time.Time{now.Add(throttlePeriod - 2*time.Second), now.Add(throttlePeriod - time.Second), now.Add(throttlePeriod + time.Second)},
|
||||
clientObjects: []runtime.Object{},
|
||||
expectedActions: []core.Action{
|
||||
@ -87,8 +79,7 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skip update configmap [cache with expected date format exists, live object exists]",
|
||||
enabled: true,
|
||||
name: "skip update configmap [cache with expected date format exists, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}},
|
||||
},
|
||||
@ -99,8 +90,7 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
expectedActions: []core.Action{},
|
||||
},
|
||||
{
|
||||
name: "update configmap [cache with unexpected date format, live object exists]",
|
||||
enabled: true,
|
||||
name: "update configmap [cache with unexpected date format, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)}},
|
||||
},
|
||||
@ -113,8 +103,7 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update configmap with no data",
|
||||
enabled: true,
|
||||
name: "update configmap with no data",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: nil},
|
||||
},
|
||||
@ -127,8 +116,7 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update configmap should ignore NotFound error [cache with unexpected date format, no live object]",
|
||||
enabled: true,
|
||||
name: "update configmap should ignore NotFound error [cache with unexpected date format, no live object]",
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: "BAD_TIMESTAMP"},
|
||||
@ -137,49 +125,9 @@ func TestSyncConfigMap(t *testing.T) {
|
||||
core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)}}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete configmap [no cache, no live object]",
|
||||
expectedActions: []core.Action{},
|
||||
},
|
||||
{
|
||||
name: "delete configmap [cache exists, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)}},
|
||||
},
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, ConfigMapName),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete configmap that's alrady being deleted [cache exists, live object exists]",
|
||||
clientObjects: []runtime.Object{
|
||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName}, Data: map[string]string{ConfigMapDataKey: now.Format(time.RFC3339)}},
|
||||
},
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName, DeletionTimestamp: &metav1.Time{Time: time.Now()}},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{},
|
||||
},
|
||||
{
|
||||
name: "delete configmap should ignore NotFound error [cache exists, no live object]",
|
||||
existingConfigMap: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: ConfigMapName},
|
||||
Data: map[string]string{ConfigMapDataKey: now.Format(dateFormat)},
|
||||
},
|
||||
expectedActions: []core.Action{
|
||||
core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, metav1.NamespaceSystem, ConfigMapName),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LegacyServiceAccountTokenTracking, test.enabled)()
|
||||
|
||||
client := fake.NewSimpleClientset(test.clientObjects...)
|
||||
limiter := rate.NewLimiter(rate.Every(throttlePeriod), 1)
|
||||
controller := newController(client, testingclock.NewFakeClock(now), limiter)
|
||||
|
@ -960,7 +960,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.Beta},
|
||||
LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30
|
||||
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
|
@ -29,12 +29,10 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
applyv1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
typedv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Claims, interface{}) {
|
||||
@ -64,7 +62,7 @@ func NewLegacyValidator(lookup bool, getter ServiceAccountTokenGetter, secretsWr
|
||||
if lookup && getter == nil {
|
||||
return nil, errors.New("ServiceAccountTokenGetter must be provided")
|
||||
}
|
||||
if lookup && secretsWriter == nil && utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyServiceAccountTokenTracking) {
|
||||
if lookup && secretsWriter == nil {
|
||||
return nil, errors.New("SecretsWriter must be provided")
|
||||
}
|
||||
return &legacyValidator{
|
||||
@ -146,25 +144,23 @@ func (v *legacyValidator) Validate(ctx context.Context, tokenData string, public
|
||||
return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyServiceAccountTokenTracking) {
|
||||
for _, ref := range serviceAccount.Secrets {
|
||||
if ref.Name == secret.Name {
|
||||
warning.AddWarning(ctx, "", "Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens.")
|
||||
break
|
||||
}
|
||||
for _, ref := range serviceAccount.Secrets {
|
||||
if ref.Name == secret.Name {
|
||||
warning.AddWarning(ctx, "", "Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens.")
|
||||
break
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
today := now.Format("2006-01-02")
|
||||
tomorrow := now.AddDate(0, 0, 1).Format("2006-01-02")
|
||||
lastUsed := secret.Labels[LastUsedLabelKey]
|
||||
if lastUsed != today && lastUsed != tomorrow {
|
||||
patchContent, err := json.Marshal(applyv1.Secret(secret.Name, secret.Namespace).WithLabels(map[string]string{LastUsedLabelKey: today}))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to marshal legacy service account token tracking labels, err: %v", err)
|
||||
} else {
|
||||
if _, err := v.secretsWriter.Secrets(namespace).Patch(ctx, secret.Name, types.MergePatchType, patchContent, metav1.PatchOptions{}); err != nil {
|
||||
klog.Errorf("Failed to label legacy service account token secret with last-used, err: %v", err)
|
||||
}
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
today := now.Format("2006-01-02")
|
||||
tomorrow := now.AddDate(0, 0, 1).Format("2006-01-02")
|
||||
lastUsed := secret.Labels[LastUsedLabelKey]
|
||||
if lastUsed != today && lastUsed != tomorrow {
|
||||
patchContent, err := json.Marshal(applyv1.Secret(secret.Name, secret.Namespace).WithLabels(map[string]string{LastUsedLabelKey: today}))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to marshal legacy service account token tracking labels, err: %v", err)
|
||||
} else {
|
||||
if _, err := v.secretsWriter.Secrets(namespace).Patch(ctx, secret.Name, types.MergePatchType, patchContent, metav1.PatchOptions{}); err != nil {
|
||||
klog.Errorf("Failed to label legacy service account token secret with last-used, err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,18 +34,15 @@ import (
|
||||
serviceaccountapiserver "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
unionauthz "k8s.io/apiserver/pkg/authorization/union"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientinformers "k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/keyutil"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/controlplane"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -232,7 +229,6 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.LegacyServiceAccountTokenTracking, true)()
|
||||
c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t)
|
||||
defer stopFunc()
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user