From 22e540bc48d9bf698c4f381ccb56ed57dea0dae2 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Wed, 2 Nov 2022 01:08:42 -0400 Subject: [PATCH] kms: add wiring to support automatic encryption config reload This change adds a flag --encryption-provider-config-automatic-reload which will be used to drive automatic reloading of the encryption config at runtime. While this flag is set to true, or when KMS v2 plugins are used without KMS v1 plugins, the /healthz endpoints associated with said plugins are collapsed into a single endpoint at /healthz/kms-providers - in this state, it is not possible to configure exclusions for specific KMS providers while including the remaining ones - ex: using /readyz?exclude=kms-provider-1 to exclude a particular KMS is not possible. This single healthz check handles checking all configured KMS providers. When reloading is enabled but no KMS providers are configured, it is a no-op. k8s.io/apiserver does not support dynamic addition and removal of healthz checks at runtime. Reloading will instead have a single static healthz check and swap the underlying implementation at runtime when a config change occurs. Signed-off-by: Monis Khan --- cmd/kube-apiserver/app/testing/testserver.go | 1 + .../server/options/encryptionconfig/config.go | 104 +++++++++---- .../options/encryptionconfig/config_test.go | 29 +++- .../apiserver/pkg/server/options/etcd.go | 15 +- .../apiserver/pkg/server/options/etcd_test.go | 114 +++++++++++--- .../multiple-kms-providers-with-v2.yaml | 18 +++ .../multiple-kms-v2-providers.yaml | 20 +++ .../encryption-configs/no-kms-provider.yaml | 10 ++ .../transformation/all_transformation_test.go | 2 +- .../transformation/kms_transformation_test.go | 140 +++++++++++------- .../kmsv2_transformation_test.go | 45 ++++-- .../secrets_transformation_test.go | 4 +- ...ion_testcase.go => transformation_test.go} | 82 +++++++++- 13 files changed, 440 insertions(+), 144 deletions(-) create mode 100644 staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-providers-with-v2.yaml create mode 100644 staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-v2-providers.yaml create mode 100644 staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/no-kms-provider.yaml rename test/integration/controlplane/transformation/{transformation_testcase.go => transformation_test.go} (82%) diff --git a/cmd/kube-apiserver/app/testing/testserver.go b/cmd/kube-apiserver/app/testing/testserver.go index 5a0952115d6..2e52f23b871 100644 --- a/cmd/kube-apiserver/app/testing/testserver.go +++ b/cmd/kube-apiserver/app/testing/testserver.go @@ -80,6 +80,7 @@ type TestServer struct { // Logger allows t.Testing and b.Testing to be passed to StartTestServer and StartTestServerOrDie type Logger interface { + Helper() Errorf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Logf(format string, args ...interface{}) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index af750674200..c91d2f94d00 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -82,57 +82,91 @@ type kmsv2PluginProbe struct { l *sync.Mutex } +type kmsHealthChecker []healthz.HealthChecker + +func (k kmsHealthChecker) Name() string { + return "kms-providers" +} + +func (k kmsHealthChecker) Check(req *http.Request) error { + var errs []error + + for i := range k { + checker := k[i] + if err := checker.Check(req); err != nil { + errs = append(errs, fmt.Errorf("%s: %w", checker.Name(), err)) + } + } + + return utilerrors.Reduce(utilerrors.NewAggregate(errs)) +} + func (h *kmsPluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { return healthz.NamedCheck(fmt.Sprintf("kms-provider-%d", idx), func(r *http.Request) error { return h.check() }) } -func (p *kmsv2PluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { +func (h *kmsv2PluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { return healthz.NamedCheck(fmt.Sprintf("kms-provider-%d", idx), func(r *http.Request) error { - return p.check(r.Context()) + return h.check(r.Context()) }) } -func LoadEncryptionConfig(filepath string, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, error) { +// LoadEncryptionConfig parses and validates the encryption config specified by filepath. +// It may launch multiple go routines whose lifecycle is controlled by stopCh. +// If reload is true, or KMS v2 plugins are used with no KMS v1 plugins, the returned slice of health checkers will always be of length 1. +func LoadEncryptionConfig(filepath string, reload bool, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, error) { config, err := loadConfig(filepath) if err != nil { - return nil, nil, fmt.Errorf("error while parsing file: %v", err) + return nil, nil, fmt.Errorf("error while parsing file: %w", err) } - return getTransformerOverridesAndKMSPluginHealthzCheckers(config, stopCh) + transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(config, stopCh) + if err != nil { + return nil, nil, fmt.Errorf("error while building transformers: %w", err) + } + + if reload || (kmsUsed.v2Used && !kmsUsed.v1Used) { + kmsHealthChecks = []healthz.HealthChecker{kmsHealthChecker(kmsHealthChecks)} + } + + return transformers, kmsHealthChecks, nil } -func getTransformerOverridesAndKMSPluginHealthzCheckers(config *apiserverconfig.EncryptionConfiguration, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, error) { +func getTransformerOverridesAndKMSPluginHealthzCheckers(config *apiserverconfig.EncryptionConfiguration, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, *kmsState, error) { var kmsHealthChecks []healthz.HealthChecker - transformers, probes, err := getTransformerOverridesAndKMSPluginProbes(config, stopCh) + transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(config, stopCh) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for i := range probes { probe := probes[i] kmsHealthChecks = append(kmsHealthChecks, probe.toHealthzCheck(i)) } - return transformers, kmsHealthChecks, nil + return transformers, kmsHealthChecks, kmsUsed, nil } type healthChecker interface { toHealthzCheck(idx int) healthz.HealthChecker } -func getTransformerOverridesAndKMSPluginProbes(config *apiserverconfig.EncryptionConfiguration, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthChecker, error) { +func getTransformerOverridesAndKMSPluginProbes(config *apiserverconfig.EncryptionConfiguration, stopCh <-chan struct{}) (map[schema.GroupResource]value.Transformer, []healthChecker, *kmsState, error) { resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{} var probes []healthChecker + var kmsUsed kmsState // For each entry in the configuration for _, resourceConfig := range config.Resources { resourceConfig := resourceConfig - transformers, p, err := prefixTransformersAndProbes(resourceConfig, stopCh) + transformers, p, used, err := prefixTransformersAndProbes(resourceConfig, stopCh) if err != nil { - return nil, nil, err + return nil, nil, nil, err } + kmsUsed.v1Used = kmsUsed.v1Used || used.v1Used + kmsUsed.v2Used = kmsUsed.v2Used || used.v2Used // For each resource, create a list of providers to use for _, resource := range resourceConfig.Resources { @@ -152,7 +186,7 @@ func getTransformerOverridesAndKMSPluginProbes(config *apiserverconfig.Encryptio transformers[gr] = value.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...) } - return transformers, probes, nil + return transformers, probes, &kmsUsed, nil } // check encrypts and decrypts test data against KMS-Plugin's gRPC endpoint. @@ -168,13 +202,13 @@ func (h *kmsPluginProbe) check() error { if err != nil { h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} h.ttl = kmsPluginHealthzNegativeTTL - return fmt.Errorf("failed to perform encrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + return fmt.Errorf("failed to perform encrypt section of the healthz check for KMS Provider %s, error: %w", h.name, err) } if _, err := h.service.Decrypt(p); err != nil { h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} h.ttl = kmsPluginHealthzNegativeTTL - return fmt.Errorf("failed to perform decrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + return fmt.Errorf("failed to perform decrypt section of the healthz check for KMS Provider %s, error: %w", h.name, err) } h.lastResponse = &kmsPluginHealthzResponse{err: nil, received: time.Now()} @@ -195,7 +229,7 @@ func (h *kmsv2PluginProbe) check(ctx context.Context) error { if err != nil { h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} h.ttl = kmsPluginHealthzNegativeTTL - return fmt.Errorf("failed to perform status section of the healthz check for KMS Provider %s, error: %v", h.name, err) + return fmt.Errorf("failed to perform status section of the healthz check for KMS Provider %s, error: %w", h.name, err) } if err := isKMSv2ProviderHealthy(h.name, p); err != nil { @@ -223,7 +257,7 @@ func isKMSv2ProviderHealthy(name string, response *envelopekmsv2.StatusResponse) } if err := utilerrors.Reduce(utilerrors.NewAggregate(errs)); err != nil { - return fmt.Errorf("kmsv2 Provider %s is not healthy, error: %v", name, err) + return fmt.Errorf("kmsv2 Provider %s is not healthy, error: %w", name, err) } return nil } @@ -231,13 +265,13 @@ func isKMSv2ProviderHealthy(name string, response *envelopekmsv2.StatusResponse) func loadConfig(filepath string) (*apiserverconfig.EncryptionConfiguration, error) { f, err := os.Open(filepath) if err != nil { - return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %w", filepath, err) } defer f.Close() data, err := io.ReadAll(f) if err != nil { - return nil, fmt.Errorf("could not read contents: %v", err) + return nil, fmt.Errorf("could not read contents: %w", err) } if len(data) == 0 { return nil, fmt.Errorf("encryption provider configuration file %q is empty", filepath) @@ -260,9 +294,10 @@ func loadConfig(filepath string) (*apiserverconfig.EncryptionConfiguration, erro return config, validation.ValidateEncryptionConfiguration(config).ToAggregate() } -func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, stopCh <-chan struct{}) ([]value.PrefixTransformer, []healthChecker, error) { +func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, stopCh <-chan struct{}) ([]value.PrefixTransformer, []healthChecker, *kmsState, error) { var transformers []value.PrefixTransformer var probes []healthChecker + var kmsUsed kmsState for _, provider := range config.Providers { provider := provider @@ -270,6 +305,7 @@ func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, s transformer value.PrefixTransformer transformerErr error probe healthChecker + used *kmsState ) switch { @@ -283,9 +319,11 @@ func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, s transformer, transformerErr = secretboxPrefixTransformer(provider.Secretbox) case provider.KMS != nil: - transformer, probe, transformerErr = kmsPrefixTransformer(provider.KMS, stopCh) + transformer, probe, used, transformerErr = kmsPrefixTransformer(provider.KMS, stopCh) if transformerErr == nil { probes = append(probes, probe) + kmsUsed.v1Used = kmsUsed.v1Used || used.v1Used + kmsUsed.v2Used = kmsUsed.v2Used || used.v2Used } case provider.Identity != nil: @@ -295,17 +333,17 @@ func prefixTransformersAndProbes(config apiserverconfig.ResourceConfiguration, s } default: - return nil, nil, errors.New("provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity") + return nil, nil, nil, errors.New("provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity") } if transformerErr != nil { - return nil, nil, transformerErr + return nil, nil, nil, transformerErr } transformers = append(transformers, transformer) } - return transformers, probes, nil + return transformers, probes, &kmsUsed, nil } type blockTransformerFunc func(cipher.Block) value.Transformer @@ -419,7 +457,11 @@ var ( EnvelopeKMSv2ServiceFactory = envelopekmsv2.NewGRPCService ) -func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-chan struct{}) (value.PrefixTransformer, healthChecker, error) { +type kmsState struct { + v1Used, v2Used bool +} + +func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-chan struct{}) (value.PrefixTransformer, healthChecker, *kmsState, error) { // we ignore the cancel func because this context should only be canceled when stopCh is closed ctx, _ := wait.ContextForChannel(stopCh) @@ -428,7 +470,7 @@ func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-cha case kmsAPIVersionV1: envelopeService, err := envelopeServiceFactory(ctx, config.Endpoint, config.Timeout.Duration) if err != nil { - return value.PrefixTransformer{}, nil, fmt.Errorf("could not configure KMSv1-Plugin's probe %q, error: %v", kmsName, err) + return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv1-Plugin's probe %q, error: %w", kmsName, err) } probe := &kmsPluginProbe{ @@ -441,16 +483,16 @@ func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-cha transformer := envelopePrefixTransformer(config, envelopeService, kmsTransformerPrefixV1) - return transformer, probe, nil + return transformer, probe, &kmsState{v1Used: true}, nil case kmsAPIVersionV2: if !utilfeature.DefaultFeatureGate.Enabled(features.KMSv2) { - return value.PrefixTransformer{}, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName) + return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName) } envelopeService, err := EnvelopeKMSv2ServiceFactory(ctx, config.Endpoint, config.Timeout.Duration) if err != nil { - return value.PrefixTransformer{}, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %v", kmsName, err) + return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %w", kmsName, err) } probe := &kmsv2PluginProbe{ @@ -467,10 +509,10 @@ func kmsPrefixTransformer(config *apiserverconfig.KMSConfiguration, stopCh <-cha Prefix: []byte(kmsTransformerPrefixV2 + kmsName + ":"), } - return transformer, probe, nil + return transformer, probe, &kmsState{v2Used: true}, nil default: - return value.PrefixTransformer{}, nil, fmt.Errorf("could not configure KMS plugin %q, unsupported KMS API version %q", kmsName, config.APIVersion) + return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMS plugin %q, unsupported KMS API version %q", kmsName, config.APIVersion) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go index 15c0e93cb95..4d1dfdb83dc 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go @@ -177,37 +177,37 @@ func TestEncryptionProviderConfigCorrect(t *testing.T) { // Transforms data using one of them, and tries to untransform using the others. // Repeats this for all possible combinations. correctConfigWithIdentityFirst := "testdata/valid-configs/identity-first.yaml" - identityFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithIdentityFirst, ctx.Done()) + identityFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithIdentityFirst, false, ctx.Done()) if err != nil { t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithIdentityFirst) } correctConfigWithAesGcmFirst := "testdata/valid-configs/aes-gcm-first.yaml" - aesGcmFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithAesGcmFirst, ctx.Done()) + aesGcmFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithAesGcmFirst, false, ctx.Done()) if err != nil { t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithAesGcmFirst) } correctConfigWithAesCbcFirst := "testdata/valid-configs/aes-cbc-first.yaml" - aesCbcFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithAesCbcFirst, ctx.Done()) + aesCbcFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithAesCbcFirst, false, ctx.Done()) if err != nil { t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithAesCbcFirst) } correctConfigWithSecretboxFirst := "testdata/valid-configs/secret-box-first.yaml" - secretboxFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithSecretboxFirst, ctx.Done()) + secretboxFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithSecretboxFirst, false, ctx.Done()) if err != nil { t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithSecretboxFirst) } correctConfigWithKMSFirst := "testdata/valid-configs/kms-first.yaml" - kmsFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithKMSFirst, ctx.Done()) + kmsFirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithKMSFirst, false, ctx.Done()) if err != nil { t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithKMSFirst) } correctConfigWithKMSv2First := "testdata/valid-configs/kmsv2-first.yaml" - kmsv2FirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithKMSv2First, ctx.Done()) + kmsv2FirstTransformerOverrides, _, err := LoadEncryptionConfig(correctConfigWithKMSv2First, false, ctx.Done()) if err != nil { t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithKMSv2First) } @@ -264,6 +264,8 @@ func TestKMSPluginHealthz(t *testing.T) { config string want []healthChecker wantErr string + kmsv2 bool + kmsv1 bool }{ { desc: "Install Healthz", @@ -274,6 +276,7 @@ func TestKMSPluginHealthz(t *testing.T) { ttl: 3 * time.Second, }, }, + kmsv1: true, }, { desc: "Install multiple healthz", @@ -288,6 +291,7 @@ func TestKMSPluginHealthz(t *testing.T) { ttl: 3 * time.Second, }, }, + kmsv1: true, }, { desc: "No KMS Providers", @@ -306,6 +310,8 @@ func TestKMSPluginHealthz(t *testing.T) { ttl: 3 * time.Second, }, }, + kmsv2: true, + kmsv1: true, }, { desc: "Invalid API version", @@ -325,7 +331,7 @@ func TestKMSPluginHealthz(t *testing.T) { return } - _, got, err := getTransformerOverridesAndKMSPluginProbes(config, testContext(t).Done()) + _, got, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(config, testContext(t).Done()) if err != nil { t.Fatal(err) } @@ -347,6 +353,13 @@ func TestKMSPluginHealthz(t *testing.T) { } } + if tt.kmsv2 != kmsUsed.v2Used { + t.Errorf("incorrect kms v2 detection: want=%v got=%v", tt.kmsv2, kmsUsed.v2Used) + } + if tt.kmsv1 != kmsUsed.v1Used { + t.Errorf("incorrect kms v1 detection: want=%v got=%v", tt.kmsv1, kmsUsed.v1Used) + } + if d := cmp.Diff(tt.want, got, cmp.Comparer(func(a, b *kmsPluginProbe) bool { return *a == *b @@ -528,7 +541,7 @@ func getTransformerFromEncryptionConfig(t *testing.T, encryptionConfigPath strin ctx := testContext(t) t.Helper() - transformers, _, err := LoadEncryptionConfig(encryptionConfigPath, ctx.Done()) + transformers, _, err := LoadEncryptionConfig(encryptionConfigPath, false, ctx.Done()) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 6a0a5891410..60d5f36ae70 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -43,8 +43,9 @@ import ( type EtcdOptions struct { // The value of Paging on StorageConfig will be overridden by the // calculated feature gate value. - StorageConfig storagebackend.Config - EncryptionProviderConfigFilepath string + StorageConfig storagebackend.Config + EncryptionProviderConfigFilepath string + EncryptionProviderConfigAutomaticReload bool EtcdServersOverrides []string @@ -117,6 +118,10 @@ func (s *EtcdOptions) Validate() []error { } + if len(s.EncryptionProviderConfigFilepath) == 0 && s.EncryptionProviderConfigAutomaticReload { + allErrors = append(allErrors, fmt.Errorf("--encryption-provider-config-automatic-reload must be set with --encryption-provider-config")) + } + return allErrors } @@ -181,6 +186,10 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.EncryptionProviderConfigFilepath, "encryption-provider-config", s.EncryptionProviderConfigFilepath, "The file containing configuration for encryption providers to be used for storing secrets in etcd") + fs.BoolVar(&s.EncryptionProviderConfigAutomaticReload, "encryption-provider-config-automatic-reload", s.EncryptionProviderConfigAutomaticReload, + "Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. "+ + "Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints.") + fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") @@ -213,7 +222,7 @@ func (s *EtcdOptions) Complete(storageObjectCountTracker flowcontrolrequest.Stor } if len(s.EncryptionProviderConfigFilepath) != 0 { - transformerOverrides, kmsPluginHealthzChecks, err := encryptionconfig.LoadEncryptionConfig(s.EncryptionProviderConfigFilepath, stopCh) + transformerOverrides, kmsPluginHealthzChecks, err := encryptionconfig.LoadEncryptionConfig(s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload, stopCh) if err != nil { return err } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go index 6fac3d53b7c..0527d3ba741 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go @@ -25,9 +25,13 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/storage/storagebackend" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" ) func TestEtcdOptionsValidate(t *testing.T) { @@ -108,6 +112,31 @@ func TestEtcdOptionsValidate(t *testing.T) { }, expectErr: "--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated", }, + { + name: "test when encryption-provider-config-automatic-reload is invalid", + testOptions: &EtcdOptions{ + StorageConfig: storagebackend.Config{ + Type: "etcd3", + Prefix: "/registry", + Transport: storagebackend.TransportConfig{ + ServerList: []string{"http://127.0.0.1"}, + KeyFile: "/var/run/kubernetes/etcd.key", + TrustedCAFile: "/var/run/kubernetes/etcdca.crt", + CertFile: "/var/run/kubernetes/etcdce.crt", + }, + CompactionInterval: storagebackend.DefaultCompactInterval, + CountMetricPollPeriod: time.Minute, + }, + EncryptionProviderConfigAutomaticReload: true, + DefaultStorageMediaType: "application/vnd.kubernetes.protobuf", + DeleteCollectionWorkers: 1, + EnableGarbageCollection: true, + EnableWatchCache: true, + DefaultWatchCacheSize: 100, + EtcdServersOverrides: []string{"/events#http://127.0.0.1:4002"}, + }, + expectErr: "--encryption-provider-config-automatic-reload must be set with --encryption-provider-config", + }, { name: "test when EtcdOptions is valid", testOptions: &EtcdOptions{ @@ -200,12 +229,26 @@ func TestParseWatchCacheSizes(t *testing.T) { } func TestKMSHealthzEndpoint(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2, true)() + testCases := []struct { name string encryptionConfigPath string wantChecks []string skipHealth bool + reload bool }{ + { + name: "no kms-provider, expect no kms healthz check", + encryptionConfigPath: "testdata/encryption-configs/no-kms-provider.yaml", + wantChecks: []string{"etcd"}, + }, + { + name: "no kms-provider+reload, expect single kms healthz check", + encryptionConfigPath: "testdata/encryption-configs/no-kms-provider.yaml", + reload: true, + wantChecks: []string{"etcd", "kms-providers"}, + }, { name: "single kms-provider, expect single kms healthz check", encryptionConfigPath: "testdata/encryption-configs/single-kms-provider.yaml", @@ -216,6 +259,34 @@ func TestKMSHealthzEndpoint(t *testing.T) { encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers.yaml", wantChecks: []string{"etcd", "kms-provider-0", "kms-provider-1"}, }, + { + name: "two kms-providers+reload, expect single kms healthz check", + encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers.yaml", + reload: true, + wantChecks: []string{"etcd", "kms-providers"}, + }, + { + name: "kms v1+v2, expect three kms healthz checks", + encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers-with-v2.yaml", + wantChecks: []string{"etcd", "kms-provider-0", "kms-provider-1", "kms-provider-2"}, + }, + { + name: "kms v1+v2+reload, expect single kms healthz check", + encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers-with-v2.yaml", + reload: true, + wantChecks: []string{"etcd", "kms-providers"}, + }, + { + name: "multiple kms v2, expect single kms healthz check", + encryptionConfigPath: "testdata/encryption-configs/multiple-kms-v2-providers.yaml", + wantChecks: []string{"etcd", "kms-providers"}, + }, + { + name: "multiple kms v2+reload, expect single kms healthz check", + encryptionConfigPath: "testdata/encryption-configs/multiple-kms-v2-providers.yaml", + reload: true, + wantChecks: []string{"etcd", "kms-providers"}, + }, { name: "two kms-providers with skip, expect zero kms healthz checks", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers.yaml", @@ -231,8 +302,9 @@ func TestKMSHealthzEndpoint(t *testing.T) { t.Run(tc.name, func(t *testing.T) { serverConfig := server.NewConfig(codecs) etcdOptions := &EtcdOptions{ - EncryptionProviderConfigFilepath: tc.encryptionConfigPath, - SkipHealthEndpoints: tc.skipHealth, + EncryptionProviderConfigFilepath: tc.encryptionConfigPath, + EncryptionProviderConfigAutomaticReload: tc.reload, + SkipHealthEndpoints: tc.skipHealth, } if err := etcdOptions.Complete(serverConfig.StorageObjectCountTracker, serverConfig.DrainedNotify()); err != nil { t.Fatal(err) @@ -241,11 +313,7 @@ func TestKMSHealthzEndpoint(t *testing.T) { t.Fatalf("Failed to add healthz error: %v", err) } - for _, n := range tc.wantChecks { - if !hasCheck(n, serverConfig.HealthzChecks) { - t.Errorf("Missing HealthzChecker %s", n) - } - } + healthChecksAreEqual(t, tc.wantChecks, serverConfig.HealthzChecks) }) } } @@ -284,25 +352,25 @@ func TestReadinessCheck(t *testing.T) { t.Fatalf("Failed to add healthz error: %v", err) } - for _, n := range tc.wantReadyzChecks { - if !hasCheck(n, serverConfig.ReadyzChecks) { - t.Errorf("Missing ReadyzChecker %s", n) - } - } - for _, n := range tc.wantHealthzChecks { - if !hasCheck(n, serverConfig.HealthzChecks) { - t.Errorf("Missing HealthzChecker %s", n) - } - } + healthChecksAreEqual(t, tc.wantReadyzChecks, serverConfig.ReadyzChecks) + healthChecksAreEqual(t, tc.wantHealthzChecks, serverConfig.HealthzChecks) }) } } -func hasCheck(want string, healthchecks []healthz.HealthChecker) bool { - for _, h := range healthchecks { - if want == h.Name() { - return true - } +func healthChecksAreEqual(t *testing.T, want []string, healthChecks []healthz.HealthChecker) { + t.Helper() + + wantSet := sets.NewString(want...) + gotSet := sets.NewString() + + for _, h := range healthChecks { + gotSet.Insert(h.Name()) + } + + gotSet.Delete("log", "ping") // not relevant for our tests + + if !wantSet.Equal(gotSet) { + t.Errorf("healthz checks are not equal, missing=%q, extra=%q", wantSet.Difference(gotSet).List(), gotSet.Difference(wantSet).List()) } - return false } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-providers-with-v2.yaml b/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-providers-with-v2.yaml new file mode 100644 index 00000000000..6003b60e14a --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-providers-with-v2.yaml @@ -0,0 +1,18 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - kms: + name: kms-provider-1 + cachesize: 1000 + endpoint: unix:///@provider1.sock + - kms: + name: kms-provider-2 + cachesize: 1000 + endpoint: unix:///@provider2.sock + - kms: + apiVersion: v2 + name: kms-provider-3 + endpoint: unix:///@provider2.sock diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-v2-providers.yaml b/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-v2-providers.yaml new file mode 100644 index 00000000000..4ea8b01299f --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/multiple-kms-v2-providers.yaml @@ -0,0 +1,20 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - kms: + apiVersion: v2 + name: kms-provider-1 + cachesize: 1000 + endpoint: unix:///@provider1.sock + - kms: + apiVersion: v2 + name: kms-provider-2 + cachesize: 1000 + endpoint: unix:///@provider2.sock + - kms: + apiVersion: v2 + name: kms-provider-3 + endpoint: unix:///@provider2.sock diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/no-kms-provider.yaml b/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/no-kms-provider.yaml new file mode 100644 index 00000000000..37e87109487 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/options/testdata/encryption-configs/no-kms-provider.yaml @@ -0,0 +1,10 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== diff --git a/test/integration/controlplane/transformation/all_transformation_test.go b/test/integration/controlplane/transformation/all_transformation_test.go index bf6775df6b0..e5a4d834251 100644 --- a/test/integration/controlplane/transformation/all_transformation_test.go +++ b/test/integration/controlplane/transformation/all_transformation_test.go @@ -95,7 +95,7 @@ resources: secret: c2VjcmV0IGlzIHNlY3VyZQ== ` - test, err := newTransformTest(t, encryptionConfig) + test, err := newTransformTest(t, encryptionConfig, false) if err != nil { t.Fatalf("failed to start Kube API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err) } diff --git a/test/integration/controlplane/transformation/kms_transformation_test.go b/test/integration/controlplane/transformation/kms_transformation_test.go index c0b5342001d..7a2767327f5 100644 --- a/test/integration/controlplane/transformation/kms_transformation_test.go +++ b/test/integration/controlplane/transformation/kms_transformation_test.go @@ -26,19 +26,16 @@ import ( "encoding/base64" "encoding/binary" "fmt" - "net/http" "strings" "testing" "time" "golang.org/x/crypto/cryptobyte" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/value" aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" mock "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/testing/v1beta1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" kmsapi "k8s.io/kms/apis/v1beta1" ) @@ -131,7 +128,7 @@ resources: } defer pluginMock.CleanUp() - test, err := newTransformTest(t, encryptionConfig) + test, err := newTransformTest(t, encryptionConfig, false) if err != nil { t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err) } @@ -320,7 +317,7 @@ resources: t.Fatalf("Failed to start KMS Plugin #2: err: %v", err) } - test, err := newTransformTest(t, encryptionConfig) + test, err := newTransformTest(t, encryptionConfig, false) if err != nil { t.Fatalf("Failed to start kube-apiserver, error: %v", err) } @@ -331,66 +328,103 @@ resources: // Stage 1 - Since all kms-plugins are guaranteed to be up, healthz checks for: // healthz/kms-provider-0 and /healthz/kms-provider-1 should be OK. - mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig) - mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig) + mustBeHealthy(t, "/kms-provider-0", "ok", test.kubeAPIServer.ClientConfig) + mustBeHealthy(t, "/kms-provider-1", "ok", test.kubeAPIServer.ClientConfig) // Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the health check for provider-1 // to fail, but provider-2 should still be OK pluginMock1.EnterFailedState() - mustBeUnHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig) - mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig) + mustBeUnHealthy(t, "/kms-provider-0", + "internal server error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled", + test.kubeAPIServer.ClientConfig) + mustBeHealthy(t, "/kms-provider-1", "ok", test.kubeAPIServer.ClientConfig) pluginMock1.ExitFailedState() // Stage 3 - kms-plugin for provider-1 is now up. Therefore, expect the health check for provider-1 // to succeed now, but provider-2 is now down. - // Need to sleep since health check chases responses for 3 seconds. pluginMock2.EnterFailedState() - mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig) - mustBeUnHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig) + mustBeHealthy(t, "/kms-provider-0", "ok", test.kubeAPIServer.ClientConfig) + mustBeUnHealthy(t, "/kms-provider-1", + "internal server error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled", + test.kubeAPIServer.ClientConfig) + pluginMock2.ExitFailedState() + + // Stage 4 - All kms-plugins are once again up, + // the healthz check should be OK. + mustBeHealthy(t, "/kms-provider-0", "ok", test.kubeAPIServer.ClientConfig) + mustBeHealthy(t, "/kms-provider-1", "ok", test.kubeAPIServer.ClientConfig) } -func mustBeHealthy(t *testing.T, checkName string, clientConfig *rest.Config) { - t.Helper() - var restErr error - pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - status, err := getHealthz(checkName, clientConfig) - restErr = err - if err != nil { - return false, err - } - return status == http.StatusOK, nil - }) +func TestKMSHealthzWithReload(t *testing.T) { + encryptionConfig := ` +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - kms: + name: provider-1 + endpoint: unix:///@kms-provider-1.sock + - kms: + name: provider-2 + endpoint: unix:///@kms-provider-2.sock +` - if pollErr == wait.ErrWaitTimeout { - t.Fatalf("failed to get the expected healthz status of OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr) - } -} - -func mustBeUnHealthy(t *testing.T, checkName string, clientConfig *rest.Config) { - t.Helper() - var restErr error - pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - status, err := getHealthz(checkName, clientConfig) - restErr = err - if err != nil { - return false, err - } - return status != http.StatusOK, nil - }) - - if pollErr == wait.ErrWaitTimeout { - t.Fatalf("failed to get the expected healthz status of !OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr) - } -} - -func getHealthz(checkName string, clientConfig *rest.Config) (int, error) { - client, err := kubernetes.NewForConfig(clientConfig) + pluginMock1, err := mock.NewBase64Plugin("@kms-provider-1.sock") if err != nil { - return 0, fmt.Errorf("failed to create a client: %v", err) + t.Fatalf("failed to create mock of KMS Plugin #1: %v", err) } - result := client.CoreV1().RESTClient().Get().AbsPath(fmt.Sprintf("/healthz/%v", checkName)).Do(context.TODO()) - status := 0 - result.StatusCode(&status) - return status, nil + if err := pluginMock1.Start(); err != nil { + t.Fatalf("Failed to start kms-plugin, err: %v", err) + } + defer pluginMock1.CleanUp() + if err := mock.WaitForBase64PluginToBeUp(pluginMock1); err != nil { + t.Fatalf("Failed to start plugin #1, err: %v", err) + } + + pluginMock2, err := mock.NewBase64Plugin("@kms-provider-2.sock") + if err != nil { + t.Fatalf("Failed to create mock of KMS Plugin #2: err: %v", err) + } + if err := pluginMock2.Start(); err != nil { + t.Fatalf("Failed to start kms-plugin, err: %v", err) + } + defer pluginMock2.CleanUp() + if err := mock.WaitForBase64PluginToBeUp(pluginMock2); err != nil { + t.Fatalf("Failed to start KMS Plugin #2: err: %v", err) + } + + test, err := newTransformTest(t, encryptionConfig, true) + if err != nil { + t.Fatalf("Failed to start kube-apiserver, error: %v", err) + } + defer test.cleanUp() + + // Name of the healthz check is always "kms-provider-0" and it covers all kms plugins. + + // Stage 1 - Since all kms-plugins are guaranteed to be up, + // the healthz check should be OK. + mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig) + + // Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the healthz check + // to fail and report that provider-1 is down + pluginMock1.EnterFailedState() + mustBeUnHealthy(t, "/kms-providers", + "internal server error: kms-provider-0: rpc error: code = FailedPrecondition desc = failed precondition - key disabled", + test.kubeAPIServer.ClientConfig) + pluginMock1.ExitFailedState() + + // Stage 3 - kms-plugin for provider-1 is now up. Therefore, expect the health check for provider-1 + // to succeed now, but provider-2 is now down. + pluginMock2.EnterFailedState() + mustBeUnHealthy(t, "/kms-providers", + "internal server error: kms-provider-1: rpc error: code = FailedPrecondition desc = failed precondition - key disabled", + test.kubeAPIServer.ClientConfig) + pluginMock2.ExitFailedState() + + // Stage 4 - All kms-plugins are once again up, + // the healthz check should be OK. + mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig) } diff --git a/test/integration/controlplane/transformation/kmsv2_transformation_test.go b/test/integration/controlplane/transformation/kmsv2_transformation_test.go index 1865dbd2cb7..6ce20fd0702 100644 --- a/test/integration/controlplane/transformation/kmsv2_transformation_test.go +++ b/test/integration/controlplane/transformation/kmsv2_transformation_test.go @@ -140,7 +140,7 @@ resources: } defer pluginMock.CleanUp() - test, err := newTransformTest(t, encryptionConfig) + test, err := newTransformTest(t, encryptionConfig, false) if err != nil { t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err) } @@ -253,33 +253,46 @@ resources: t.Fatalf("Failed to start KMS Plugin #2: err: %v", err) } - test, err := newTransformTest(t, encryptionConfig) + test, err := newTransformTest(t, encryptionConfig, false) if err != nil { t.Fatalf("Failed to start kube-apiserver, error: %v", err) } defer test.cleanUp() - // Name of the healthz check is calculated based on a constant "kms-provider-" + position of the - // provider in the config. + // Name of the healthz check is always "kms-provider-0" and it covers all kms plugins. - // Stage 1 - Since all kms-plugins are guaranteed to be up, healthz checks for: - // healthz/kms-provider-0 and /healthz/kms-provider-1 should be OK. - mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig) - mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig) + // Stage 1 - Since all kms-plugins are guaranteed to be up, + // the healthz check should be OK. + mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig) - // Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the health check for provider-1 - // to fail, but provider-2 should still be OK + // Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the healthz check + // to fail and report that provider-1 is down pluginMock1.EnterFailedState() - mustBeUnHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig) - mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig) + mustBeUnHealthy(t, "/kms-providers", + "internal server error: kms-provider-0: rpc error: code = FailedPrecondition desc = failed precondition - key disabled", + test.kubeAPIServer.ClientConfig) pluginMock1.ExitFailedState() // Stage 3 - kms-plugin for provider-1 is now up. Therefore, expect the health check for provider-1 // to succeed now, but provider-2 is now down. - // Need to sleep since health check chases responses for 3 seconds. pluginMock2.EnterFailedState() - mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig) - mustBeUnHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig) + mustBeUnHealthy(t, "/kms-providers", + "internal server error: kms-provider-1: rpc error: code = FailedPrecondition desc = failed precondition - key disabled", + test.kubeAPIServer.ClientConfig) + pluginMock2.ExitFailedState() + + // Stage 4 - All kms-plugins are once again up, + // the healthz check should be OK. + mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig) + + // Stage 5 - All kms-plugins are unhealthy at the same time and we can observe both failures. + pluginMock1.EnterFailedState() + pluginMock2.EnterFailedState() + mustBeUnHealthy(t, "/kms-providers", + "internal server error: "+ + "[kms-provider-0: failed to perform status section of the healthz check for KMS Provider provider-1, error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled,"+ + " kms-provider-1: failed to perform status section of the healthz check for KMS Provider provider-2, error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled]", + test.kubeAPIServer.ClientConfig) } func TestKMSv2SingleService(t *testing.T) { @@ -328,7 +341,7 @@ resources: } t.Cleanup(pluginMock.CleanUp) - test, err := newTransformTest(t, encryptionConfig) + test, err := newTransformTest(t, encryptionConfig, false) if err != nil { t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err) } diff --git a/test/integration/controlplane/transformation/secrets_transformation_test.go b/test/integration/controlplane/transformation/secrets_transformation_test.go index ccbd86ef0ff..58f29d7ba19 100644 --- a/test/integration/controlplane/transformation/secrets_transformation_test.go +++ b/test/integration/controlplane/transformation/secrets_transformation_test.go @@ -85,7 +85,7 @@ func TestSecretsShouldBeTransformed(t *testing.T) { // TODO: add secretbox } for _, tt := range testCases { - test, err := newTransformTest(t, tt.transformerConfigContent) + test, err := newTransformTest(t, tt.transformerConfigContent, false) if err != nil { test.cleanUp() t.Errorf("failed to setup test for envelop %s, error was %v", tt.transformerPrefix, err) @@ -120,7 +120,7 @@ func BenchmarkAESCBCEnvelopeWrite(b *testing.B) { func runBenchmark(b *testing.B, transformerConfig string) { b.StopTimer() - test, err := newTransformTest(b, transformerConfig) + test, err := newTransformTest(b, transformerConfig, false) defer test.cleanUp() if err != nil { b.Fatalf("failed to setup benchmark for config %s, error was %v", transformerConfig, err) diff --git a/test/integration/controlplane/transformation/transformation_testcase.go b/test/integration/controlplane/transformation/transformation_test.go similarity index 82% rename from test/integration/controlplane/transformation/transformation_testcase.go rename to test/integration/controlplane/transformation/transformation_test.go index 14da3360eba..c949f503790 100644 --- a/test/integration/controlplane/transformation/transformation_testcase.go +++ b/test/integration/controlplane/transformation/transformation_test.go @@ -26,26 +26,28 @@ import ( "strconv" "strings" "testing" - - "k8s.io/klog/v2" + "time" clientv3 "go.etcd.io/etcd/client/v3" - "k8s.io/component-base/metrics/legacyregistry" - "sigs.k8s.io/yaml" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/value" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/etcd" "k8s.io/kubernetes/test/integration/framework" + "sigs.k8s.io/yaml" ) const ( @@ -78,7 +80,7 @@ type transformTest struct { secret *corev1.Secret } -func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML string) (*transformTest, error) { +func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML string, reload bool) (*transformTest, error) { e := transformTest{ logger: l, transformerConfig: transformerConfigYAML, @@ -92,7 +94,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin } } - if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, nil, e.getEncryptionOptions(), e.storageConfig); err != nil { + if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, nil, e.getEncryptionOptions(reload), e.storageConfig); err != nil { return nil, fmt.Errorf("failed to start KubeAPI server: %v", err) } klog.Infof("Started kube-apiserver %v", e.kubeAPIServer.ClientConfig.Host) @@ -105,6 +107,15 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin return nil, err } + if transformerConfigYAML != "" && reload { + // when reloading is enabled, this healthz endpoint is always present + mustBeHealthy(l, "/kms-providers", "ok", e.kubeAPIServer.ClientConfig) + + // excluding healthz endpoints even if they do not exist should work + mustBeHealthy(l, "", `warn: some health checks cannot be excluded: no matches for "kms-provider-0","kms-provider-1","kms-provider-2","kms-provider-3"`, + e.kubeAPIServer.ClientConfig, "kms-provider-0", "kms-provider-1", "kms-provider-2", "kms-provider-3") + } + return &e, nil } @@ -228,10 +239,11 @@ func (e *transformTest) getRawSecretFromETCD() ([]byte, error) { return etcdResponse.Kvs[0].Value, nil } -func (e *transformTest) getEncryptionOptions() []string { +func (e *transformTest) getEncryptionOptions(reload bool) []string { if e.transformerConfig != "" { return []string{ "--encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName), + fmt.Sprintf("--encryption-provider-config-automatic-reload=%v", reload), "--disable-admission-plugins", "ServiceAccount"} } @@ -401,3 +413,59 @@ func (e *transformTest) printMetrics() error { return nil } + +func mustBeHealthy(t kubeapiservertesting.Logger, checkName, wantBodyContains string, clientConfig *rest.Config, excludes ...string) { + t.Helper() + var restErr error + pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { + body, ok, err := getHealthz(checkName, clientConfig, excludes...) + restErr = err + if err != nil { + return false, err + } + done := ok && strings.Contains(body, wantBodyContains) + if !done { + t.Logf("expected server check %q to be healthy with message %q but it is not: %s", checkName, wantBodyContains, body) + } + return done, nil + }) + + if pollErr == wait.ErrWaitTimeout { + t.Fatalf("failed to get the expected healthz status of OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr) + } +} + +func mustBeUnHealthy(t kubeapiservertesting.Logger, checkName, wantBodyContains string, clientConfig *rest.Config, excludes ...string) { + t.Helper() + var restErr error + pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { + body, ok, err := getHealthz(checkName, clientConfig, excludes...) + restErr = err + if err != nil { + return false, err + } + done := !ok && strings.Contains(body, wantBodyContains) + if !done { + t.Logf("expected server check %q to be unhealthy with message %q but it is not: %s", checkName, wantBodyContains, body) + } + return done, nil + }) + + if pollErr == wait.ErrWaitTimeout { + t.Fatalf("failed to get the expected healthz status of !OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr) + } +} + +func getHealthz(checkName string, clientConfig *rest.Config, excludes ...string) (string, bool, error) { + client, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return "", false, fmt.Errorf("failed to create a client: %v", err) + } + + req := client.CoreV1().RESTClient().Get().AbsPath(fmt.Sprintf("/healthz%v", checkName)).Param("verbose", "true") + for _, exclude := range excludes { + req.Param("exclude", exclude) + } + body, err := req.DoRaw(context.TODO()) // we can still have a response body during an error case + return string(body), err == nil, nil +}