From 543f29be4eafc43c6d8aeda389cf684922a14306 Mon Sep 17 00:00:00 2001 From: "Rostislav M. Georgiev" Date: Thu, 28 Nov 2019 14:47:53 +0200 Subject: [PATCH 1/2] kubeadm: Reduce kubelet.DownloadConfig usage kubelet.DownloadConfig is an old utility function which takes a client set and a kubelet version, uses them to fetch the kubelet component config from a config map, and places it in a local file. This function is simple to use, but it is dangerous and unnecessary. Practically, in all cases the kubelet configuration is present locally and does not need to be fetched from a config map on the cluster (it just needs to be stored in a file). Furthermore, kubelet.DownloadConfig does not use the kubeadm component configs module in any way. Hence, a kubelet configuration fetched using it may not be patched, validated, or otherwise, processed in any way by kubeadm other than piping it to a file. This patch replaces all but a single kubelet.DownloadConfig invocation with equivalents that get the local copy of the kubelet component config and just store it in a file. The sole remaining invocation covers the `kubeadm upgrade node --kubelet-version` case. In addition to that, a possible panic is fixed in kubelet.DownloadConfig and it now takes the kubelet version parameter as string. Signed-off-by: Rostislav M. Georgiev --- cmd/kubeadm/app/cmd/phases/init/BUILD | 1 - cmd/kubeadm/app/cmd/phases/init/kubelet.go | 8 +---- cmd/kubeadm/app/cmd/phases/join/BUILD | 1 - cmd/kubeadm/app/cmd/phases/join/kubelet.go | 8 +---- cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD | 1 - .../cmd/phases/upgrade/node/kubeletconfig.go | 32 ++++++++----------- cmd/kubeadm/app/cmd/upgrade/apply.go | 2 +- cmd/kubeadm/app/phases/kubelet/BUILD | 1 - cmd/kubeadm/app/phases/kubelet/config.go | 32 ++++++++++++------- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 16 +++------- 10 files changed, 42 insertions(+), 60 deletions(-) diff --git a/cmd/kubeadm/app/cmd/phases/init/BUILD b/cmd/kubeadm/app/cmd/phases/init/BUILD index 98fcbdf23a1..d79f0cecef7 100644 --- a/cmd/kubeadm/app/cmd/phases/init/BUILD +++ b/cmd/kubeadm/app/cmd/phases/init/BUILD @@ -27,7 +27,6 @@ go_library( "//cmd/kubeadm/app/cmd/options:go_default_library", "//cmd/kubeadm/app/cmd/phases/workflow:go_default_library", "//cmd/kubeadm/app/cmd/util:go_default_library", - "//cmd/kubeadm/app/componentconfigs:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/addons/dns:go_default_library", "//cmd/kubeadm/app/phases/addons/proxy:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/init/kubelet.go b/cmd/kubeadm/app/cmd/phases/init/kubelet.go index 311384f4ab0..0db4fa16fe5 100644 --- a/cmd/kubeadm/app/cmd/phases/init/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/init/kubelet.go @@ -24,7 +24,6 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" - "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" ) @@ -72,13 +71,8 @@ func runKubeletStart(c workflow.RunData) error { return errors.Wrap(err, "error writing a dynamic environment file for the kubelet") } - kubeletCfg, ok := data.Cfg().ComponentConfigs[componentconfigs.KubeletGroup] - if !ok { - return errors.New("no kubelet component config found in the active component config set") - } - // Write the kubelet configuration file to disk. - if err := kubeletphase.WriteConfigToDisk(kubeletCfg, data.KubeletDir()); err != nil { + if err := kubeletphase.WriteConfigToDisk(&data.Cfg().ClusterConfiguration, data.KubeletDir()); err != nil { return errors.Wrap(err, "error writing kubelet configuration to disk") } diff --git a/cmd/kubeadm/app/cmd/phases/join/BUILD b/cmd/kubeadm/app/cmd/phases/join/BUILD index 80000374edc..c4492d4cce3 100644 --- a/cmd/kubeadm/app/cmd/phases/join/BUILD +++ b/cmd/kubeadm/app/cmd/phases/join/BUILD @@ -34,7 +34,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/join/kubelet.go b/cmd/kubeadm/app/cmd/phases/join/kubelet.go index a119f10e460..4abb2d6ba4e 100644 --- a/cmd/kubeadm/app/cmd/phases/join/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/join/kubelet.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" @@ -122,11 +121,6 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { } } - kubeletVersion, err := version.ParseSemantic(initCfg.ClusterConfiguration.KubernetesVersion) - if err != nil { - return err - } - bootstrapClient, err := kubeconfigutil.ClientSetFromFile(bootstrapKubeConfigFile) if err != nil { return errors.Errorf("couldn't create client from kubeconfig file %q", bootstrapKubeConfigFile) @@ -160,7 +154,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { kubeletphase.TryStopKubelet() // Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start - if err := kubeletphase.DownloadConfig(bootstrapClient, kubeletVersion, kubeadmconstants.KubeletRunDirectory); err != nil { + if err := kubeletphase.WriteConfigToDisk(&initCfg.ClusterConfiguration, kubeadmconstants.KubeletRunDirectory); err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD b/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD index f5d74dcac4a..2e1080798cc 100644 --- a/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD +++ b/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD @@ -22,7 +22,6 @@ go_library( "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/dryrun:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go b/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go index 9d147e17719..e6a50d7d8c8 100644 --- a/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go +++ b/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go @@ -23,7 +23,6 @@ import ( "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" @@ -66,7 +65,6 @@ func runKubeletConfigPhase() func(c workflow.RunData) error { // otherwise, retrieve all the info required for kubelet config upgrade cfg := data.Cfg() - client := data.Client() dryRun := data.DryRun() // Set up the kubelet directory to use. If dry-running, this will return a fake directory @@ -75,24 +73,20 @@ func runKubeletConfigPhase() func(c workflow.RunData) error { return err } - // Gets the target kubelet version. - // by default kubelet version is expected to be equal to ClusterConfiguration.KubernetesVersion, but - // users can specify a different kubelet version (this is a legacy of the original implementation - // of `kubeam upgrade node config` which we are preserving in order to don't break GA contract) - kubeletVersionStr := cfg.ClusterConfiguration.KubernetesVersion - if data.KubeletVersion() != "" && data.KubeletVersion() != kubeletVersionStr { - kubeletVersionStr = data.KubeletVersion() - fmt.Printf("[upgrade] Using kubelet config version %s, while kubernetes-version is %s\n", kubeletVersionStr, cfg.ClusterConfiguration.KubernetesVersion) - } - - // Parse the desired kubelet version - kubeletVersion, err := version.ParseSemantic(kubeletVersionStr) - if err != nil { - return err - } - // TODO: Checkpoint the current configuration first so that if something goes wrong it can be recovered - if err := kubeletphase.DownloadConfig(client, kubeletVersion, kubeletDir); err != nil { + + // Store the kubelet component configuration. + // By default the kubelet version is expected to be equal to cfg.ClusterConfiguration.KubernetesVersion, but + // users can specify a different kubelet version (this is a legacy of the original implementation + // of `kubeadm upgrade node config` which we are preserving in order to not break the GA contract) + if data.KubeletVersion() != "" && data.KubeletVersion() != cfg.ClusterConfiguration.KubernetesVersion { + fmt.Printf("[upgrade] Using kubelet config version %s, while kubernetes-version is %s\n", data.KubeletVersion(), cfg.ClusterConfiguration.KubernetesVersion) + if err := kubeletphase.DownloadConfig(data.Client(), data.KubeletVersion(), kubeletDir); err != nil { + return err + } + + // WriteConfigToDisk is what we should be calling since we already have the correct component config loaded + } else if err = kubeletphase.WriteConfigToDisk(&cfg.ClusterConfiguration, kubeletDir); err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 8b59e6a2c59..9ca4f5c515e 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -170,7 +170,7 @@ func runApply(flags *applyFlags, userVersion string) error { // Upgrade RBAC rules and addons. klog.V(1).Infoln("[upgrade/postupgrade] upgrading RBAC rules and addons") - if err := upgrade.PerformPostUpgradeTasks(client, cfg, newK8sVersion, flags.dryRun); err != nil { + if err := upgrade.PerformPostUpgradeTasks(client, cfg, flags.dryRun); err != nil { return errors.Wrap(err, "[upgrade/postupgrade] FATAL post-upgrade error") } diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index 276b76dbf17..4f9a0050655 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -20,7 +20,6 @@ go_library( "//cmd/kubeadm/app/util/initsystem:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubelet/config.go b/cmd/kubeadm/app/phases/kubelet/config.go index 6e873528101..1aeb39b6ebd 100644 --- a/cmd/kubeadm/app/phases/kubelet/config.go +++ b/cmd/kubeadm/app/phases/kubelet/config.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" @@ -38,12 +37,17 @@ import ( // WriteConfigToDisk writes the kubelet config object down to a file // Used at "kubeadm init" and "kubeadm upgrade" time -func WriteConfigToDisk(kubeletCfg kubeadmapi.ComponentConfig, kubeletDir string) error { +func WriteConfigToDisk(cfg *kubeadmapi.ClusterConfiguration, kubeletDir string) error { + kubeletCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeletGroup] + if !ok { + return errors.New("no kubelet component config found in the active component config set") + } kubeletBytes, err := kubeletCfg.Marshal() if err != nil { return err } + return writeConfigBytesToDisk(kubeletBytes, kubeletDir) } @@ -130,8 +134,13 @@ func createConfigMapRBACRules(client clientset.Interface, k8sVersion *version.Ve } // DownloadConfig downloads the kubelet configuration from a ConfigMap and writes it to disk. -// Used at "kubeadm join" time -func DownloadConfig(client clientset.Interface, kubeletVersion *version.Version, kubeletDir string) error { +// DEPRECATED: Do not use in new code! +func DownloadConfig(client clientset.Interface, kubeletVersionStr string, kubeletDir string) error { + // Parse the desired kubelet version + kubeletVersion, err := version.ParseSemantic(kubeletVersionStr) + if err != nil { + return err + } // Download the ConfigMap from the cluster based on what version the kubelet is configMapName := kubeadmconstants.GetKubeletConfigMapName(kubeletVersion) @@ -139,17 +148,18 @@ func DownloadConfig(client clientset.Interface, kubeletVersion *version.Version, fmt.Printf("[kubelet-start] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n", configMapName, metav1.NamespaceSystem) - kubeletCfg, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName) - // If the ConfigMap wasn't found and the kubelet version is v1.10.x, where we didn't support the config file yet - // just return, don't error out - if apierrors.IsNotFound(err) && kubeletVersion.Minor() == 10 { - return nil - } + kubeletCfgMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName) if err != nil { return err } - return writeConfigBytesToDisk([]byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey]), kubeletDir) + // Check for the key existence, otherwise we'll panic here + kubeletCfg, ok := kubeletCfgMap.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey] + if !ok { + return errors.Errorf("no key %q found in config map %s", kubeadmconstants.KubeletBaseConfigurationConfigMapKey, configMapName) + } + + return writeConfigBytesToDisk([]byte(kubeletCfg), kubeletDir) } // configMapRBACName returns the name for the Role/RoleBinding for the kubelet config configmap for the right branch of k8s diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 9daece5e00d..886a9da1fca 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" errorsutil "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -44,7 +43,7 @@ import ( // PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do // Note that the mark-control-plane phase is left out, not needed, and no token is created as that doesn't belong to the upgrade -func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, newK8sVer *version.Version, dryRun bool) error { +func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, dryRun bool) error { errs := []error{} // Upload currently used configuration to the cluster @@ -60,7 +59,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon } // Write the new kubelet config down to disk and the env file if needed - if err := writeKubeletConfigFiles(client, cfg, newK8sVer, dryRun); err != nil { + if err := writeKubeletConfigFiles(client, cfg, dryRun); err != nil { errs = append(errs, err) } @@ -204,7 +203,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfigurati }, 10) } -func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, newK8sVer *version.Version, dryRun bool) error { +func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, dryRun bool) error { kubeletDir, err := GetKubeletDir(dryRun) if err != nil { // The error here should never occur in reality, would only be thrown if /tmp doesn't exist on the machine. @@ -212,13 +211,8 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon } errs := []error{} // Write the configuration for the kubelet down to disk so the upgraded kubelet can start with fresh config - if err := kubeletphase.DownloadConfig(client, newK8sVer, kubeletDir); err != nil { - // Tolerate the error being NotFound when dryrunning, as there is a pretty common scenario: the dryrun process - // *would* post the new kubelet-config-1.X configmap that doesn't exist now when we're trying to download it - // again. - if !(apierrors.IsNotFound(err) && dryRun) { - errs = append(errs, errors.Wrap(err, "error downloading kubelet configuration from the ConfigMap")) - } + if err := kubeletphase.WriteConfigToDisk(&cfg.ClusterConfiguration, kubeletDir); err != nil { + errs = append(errs, errors.Wrap(err, "error writing kubelet configuration to file")) } if dryRun { // Print what contents would be written From 5d6cf8ecd486464ba5c18d7ff389ebd8597c36c2 Mon Sep 17 00:00:00 2001 From: "Rostislav M. Georgiev" Date: Wed, 4 Dec 2019 18:01:36 +0200 Subject: [PATCH 2/2] kubeadm: Distinguish between user supplied and generated component configs Until now, users were always asked to manually convert a component config to a version supported by kubeadm, if kubeadm is not supporting its version. This is true even for configs generated with older kubeadm versions, hence getting users to make manual conversions on kubeadm generated configs. This is not appropriate and user friendly, although, it tends to be the most common case. Hence, we sign kubeadm generated component configs stored in config maps with a SHA256 checksum. If a configs is loaded by kubeadm from a config map and has a valid signature it's considered "kubeadm generated" and if a version migration is required, this config is automatically discarded and a new one is generated. If there is no checksum or the checksum is not matching, the config is considered as "user supplied" and, if a version migration is required, kubeadm will bail out with an error, requiring manual config migration (as it's today). The behavior when supplying component configs on the kubeadm command line does not change. Kubeadm would still bail out with an error requiring migration if it can recognize their groups but not versions. Signed-off-by: Rostislav M. Georgiev --- cmd/kubeadm/app/apis/kubeadm/types.go | 6 + cmd/kubeadm/app/componentconfigs/BUILD | 3 + cmd/kubeadm/app/componentconfigs/checksums.go | 74 ++++++ .../app/componentconfigs/checksums_test.go | 224 ++++++++++++++++++ cmd/kubeadm/app/componentconfigs/configset.go | 78 ++++-- cmd/kubeadm/app/componentconfigs/kubelet.go | 12 +- .../app/componentconfigs/kubelet_test.go | 138 +++++++++-- cmd/kubeadm/app/componentconfigs/kubeproxy.go | 12 +- .../app/componentconfigs/kubeproxy_test.go | 130 ++++++++-- cmd/kubeadm/app/componentconfigs/utils.go | 20 ++ cmd/kubeadm/app/constants/constants.go | 3 + cmd/kubeadm/app/phases/addons/proxy/proxy.go | 94 ++++---- cmd/kubeadm/app/phases/kubelet/config.go | 12 +- 13 files changed, 698 insertions(+), 108 deletions(-) create mode 100644 cmd/kubeadm/app/componentconfigs/checksums.go create mode 100644 cmd/kubeadm/app/componentconfigs/checksums_test.go diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index e5c78a36d15..1759b765513 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -445,6 +445,12 @@ type ComponentConfig interface { // Default patches the component config with kubeadm preferred defaults Default(cfg *ClusterConfiguration, localAPIEndpoint *APIEndpoint, nodeRegOpts *NodeRegistrationOptions) + + // IsUserSupplied indicates if the component config was supplied or modified by a user or was kubeadm generated + IsUserSupplied() bool + + // SetUserSupplied sets the state of the component config "user supplied" flag to, either true, or false. + SetUserSupplied(userSupplied bool) } // ComponentConfigMap is a map between a group name (as in GVK group) and a ComponentConfig diff --git a/cmd/kubeadm/app/componentconfigs/BUILD b/cmd/kubeadm/app/componentconfigs/BUILD index 98d998241c5..f6e0016b6ee 100644 --- a/cmd/kubeadm/app/componentconfigs/BUILD +++ b/cmd/kubeadm/app/componentconfigs/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "checksums.go", "configset.go", "kubelet.go", "kubeproxy.go", @@ -19,6 +20,7 @@ go_library( "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/initsystem:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -40,6 +42,7 @@ go_library( go_test( name = "go_default_test", srcs = [ + "checksums_test.go", "configset_test.go", "kubelet_test.go", "kubeproxy_test.go", diff --git a/cmd/kubeadm/app/componentconfigs/checksums.go b/cmd/kubeadm/app/componentconfigs/checksums.go new file mode 100644 index 00000000000..94a360b55e5 --- /dev/null +++ b/cmd/kubeadm/app/componentconfigs/checksums.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentconfigs + +import ( + "crypto/sha256" + "fmt" + "sort" + + v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +// ChecksumForConfigMap calculates a checksum for the supplied config map. The exact algorithm depends on hash and prefix parameters +func ChecksumForConfigMap(cm *v1.ConfigMap) string { + hash := sha256.New() + + // Since maps are not ordered we need to make sure we order them somehow so we'll always get the same checksums + // for the same config maps. The solution here is to extract the keys into a slice and sort them. + // Then iterate over that slice to fetch the values to be hashed. + keys := []string{} + for key := range cm.Data { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + hash.Write([]byte(cm.Data[key])) + } + + // Do the same as above, but for binaryData this time. + keys = []string{} + for key := range cm.BinaryData { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + hash.Write(cm.BinaryData[key]) + } + + return fmt.Sprintf("sha256:%x", hash.Sum(nil)) +} + +// SignConfigMap calculates the supplied config map checksum and annotates it with it +func SignConfigMap(cm *v1.ConfigMap) { + if cm.Annotations == nil { + cm.Annotations = map[string]string{} + } + cm.Annotations[constants.ComponentConfigHashAnnotationKey] = ChecksumForConfigMap(cm) +} + +// VerifyConfigMapSignature returns true if the config map has checksum annotation and it matches; false otherwise +func VerifyConfigMapSignature(cm *v1.ConfigMap) bool { + signature, ok := cm.Annotations[constants.ComponentConfigHashAnnotationKey] + if !ok { + return false + } + return signature == ChecksumForConfigMap(cm) +} diff --git a/cmd/kubeadm/app/componentconfigs/checksums_test.go b/cmd/kubeadm/app/componentconfigs/checksums_test.go new file mode 100644 index 00000000000..babd3020038 --- /dev/null +++ b/cmd/kubeadm/app/componentconfigs/checksums_test.go @@ -0,0 +1,224 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentconfigs + +import ( + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +var checksumTestCases = []struct { + desc string + configMap *v1.ConfigMap + checksum string +}{ + { + desc: "checksum is calculated using both data and binaryData", + checksum: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238", + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "bar", + }, + BinaryData: map[string][]byte{ + "bar": []byte("baz"), + }, + }, + }, + { + desc: "config keys have no effect on the checksum", + checksum: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238", + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo2": "bar", + }, + BinaryData: map[string][]byte{ + "bar2": []byte("baz"), + }, + }, + }, + { + desc: "metadata fields have no effect on the checksum", + checksum: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "le-config", + Namespace: "le-namespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + Annotations: map[string]string{ + "annotation1": "value1", + "annotation2": "value2", + }, + }, + Data: map[string]string{ + "foo": "bar", + }, + BinaryData: map[string][]byte{ + "bar": []byte("baz"), + }, + }, + }, + { + desc: "checksum can be calculated without binaryData", + checksum: "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "bar", + }, + }, + }, + { + desc: "checksum can be calculated without data", + checksum: "sha256:baa5a0964d3320fbc0c6a922140453c8513ea24ab8fd0577034804a967248096", + configMap: &v1.ConfigMap{ + BinaryData: map[string][]byte{ + "bar": []byte("baz"), + }, + }, + }, +} + +func TestChecksumForConfigMap(t *testing.T) { + for _, test := range checksumTestCases { + t.Run(test.desc, func(t *testing.T) { + got := ChecksumForConfigMap(test.configMap) + if got != test.checksum { + t.Errorf("checksum mismatch - got %q, expected %q", got, test.checksum) + } + }) + } +} + +func TestSignConfigMap(t *testing.T) { + for _, test := range checksumTestCases { + t.Run(test.desc, func(t *testing.T) { + target := test.configMap.DeepCopy() + SignConfigMap(target) + + // Verify that we have a correct annotation + signature, ok := target.Annotations[constants.ComponentConfigHashAnnotationKey] + if !ok { + t.Errorf("no %s annotation found in the config map", constants.ComponentConfigHashAnnotationKey) + } else { + if signature != test.checksum { + t.Errorf("unexpected checksum - got %q, expected %q", signature, test.checksum) + } + } + + // Verify that we have added an annotation (and not overwritten them) + expectedAnnotationCount := 1 + len(test.configMap.Annotations) + if len(target.Annotations) != expectedAnnotationCount { + t.Errorf("unexpected number of annotations - got %d, expected %d", len(target.Annotations), expectedAnnotationCount) + } + }) + } +} + +func TestVerifyConfigMapSignature(t *testing.T) { + tests := []struct { + desc string + configMap *v1.ConfigMap + expectErr bool + }{ + { + desc: "correct signature is acknowledged", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "le-config", + Namespace: "le-namespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + Annotations: map[string]string{ + "annotation1": "value1", + "annotation2": "value2", + constants.ComponentConfigHashAnnotationKey: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238", + }, + }, + Data: map[string]string{ + "foo": "bar", + }, + BinaryData: map[string][]byte{ + "bar": []byte("baz"), + }, + }, + }, + { + desc: "wrong checksum leads to failure", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "le-config", + Namespace: "le-namespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + Annotations: map[string]string{ + "annotation1": "value1", + "annotation2": "value2", + constants.ComponentConfigHashAnnotationKey: "sha256:832cb34fc68fc370dd44dd91d5699c118ec49e46e5e6014866d6a827427b8f8c", + }, + }, + Data: map[string]string{ + "foo": "bar", + }, + BinaryData: map[string][]byte{ + "bar": []byte("baz"), + }, + }, + expectErr: true, + }, + { + desc: "missing signature means error", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "le-config", + Namespace: "le-namespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + Annotations: map[string]string{ + "annotation1": "value1", + "annotation2": "value2", + }, + }, + Data: map[string]string{ + "foo": "bar", + }, + BinaryData: map[string][]byte{ + "bar": []byte("baz"), + }, + }, + expectErr: true, + }, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + result := VerifyConfigMapSignature(test.configMap) + if result != !test.expectErr { + t.Errorf("unexpected result - got %t, expected %t", result, !test.expectErr) + } + }) + } +} diff --git a/cmd/kubeadm/app/componentconfigs/configset.go b/cmd/kubeadm/app/componentconfigs/configset.go index f65fa15b383..c86c8cde145 100644 --- a/cmd/kubeadm/app/componentconfigs/configset.go +++ b/cmd/kubeadm/app/componentconfigs/configset.go @@ -61,6 +61,8 @@ func (h *handler) FromDocumentMap(docmap kubeadmapi.DocumentMap) (kubeadmapi.Com if err := cfg.Unmarshal(docmap); err != nil { return nil, err } + // consider all successfully loaded configs from a document map as user supplied + cfg.SetUserSupplied(true) return cfg, nil } } @@ -89,7 +91,24 @@ func (h *handler) fromConfigMap(client clientset.Interface, cmName, cmKey string return nil, err } - return h.FromDocumentMap(gvkmap) + // If the checksum comes up neatly we assume the config was generated + generatedConfig := VerifyConfigMapSignature(configMap) + + componentCfg, err := h.FromDocumentMap(gvkmap) + if err != nil { + // If the config was generated and we get UnsupportedConfigVersionError, we skip loading it. + // This will force us to use the generated default current version (effectively regenerating the config with the current version). + if _, ok := err.(*UnsupportedConfigVersionError); ok && generatedConfig { + return nil, nil + } + return nil, err + } + + if componentCfg != nil { + componentCfg.SetUserSupplied(!generatedConfig) + } + + return componentCfg, nil } // FromCluster loads a component from a config map in the cluster @@ -97,25 +116,60 @@ func (h *handler) FromCluster(clientset clientset.Interface, clusterCfg *kubeadm return h.fromCluster(h, clientset, clusterCfg) } +// known holds the known component config handlers. Add new component configs here. +var known = []*handler{ + &kubeProxyHandler, + &kubeletHandler, +} + +// configBase is the base type for all component config implementations +type configBase struct { + // GroupVersion holds the supported GroupVersion for the inheriting config + GroupVersion schema.GroupVersion + + // userSupplied tells us if the config is user supplied (invalid checksum) or not + userSupplied bool +} + +func (cb *configBase) IsUserSupplied() bool { + return cb.userSupplied +} + +func (cb *configBase) SetUserSupplied(userSupplied bool) { + cb.userSupplied = userSupplied +} + +func (cb *configBase) DeepCopyInto(other *configBase) { + *other = *cb +} + +func cloneBytes(in []byte) []byte { + out := make([]byte, len(in)) + copy(out, in) + return out +} + // Marshal is an utility function, used by the component config support implementations to marshal a runtime.Object to YAML with the // correct group and version -func (h *handler) Marshal(object runtime.Object) ([]byte, error) { - return kubeadmutil.MarshalToYamlForCodecs(object, h.GroupVersion, Codecs) +func (cb *configBase) Marshal(object runtime.Object) ([]byte, error) { + return kubeadmutil.MarshalToYamlForCodecs(object, cb.GroupVersion, Codecs) } // Unmarshal attempts to unmarshal a runtime.Object from a document map. If no object is found, no error is returned. // If a matching group is found, but no matching version an error is returned indicating that users should do manual conversion. -func (h *handler) Unmarshal(from kubeadmapi.DocumentMap, into runtime.Object) error { +func (cb *configBase) Unmarshal(from kubeadmapi.DocumentMap, into runtime.Object) error { for gvk, yaml := range from { // If this is a different group, we ignore it - if gvk.Group != h.GroupVersion.Group { + if gvk.Group != cb.GroupVersion.Group { continue } - // If this is the correct group, but different version, we return an error - if gvk.Version != h.GroupVersion.Version { - // TODO: Replace this with a special error type and make UX better around it - return errors.Errorf("unexpected apiVersion %q, you may have to do manual conversion to %q and execute kubeadm again", gvk.GroupVersion(), h.GroupVersion) + if gvk.Version != cb.GroupVersion.Version { + return &UnsupportedConfigVersionError{ + OldVersion: gvk.GroupVersion(), + CurrentVersion: cb.GroupVersion, + Document: cloneBytes(yaml), + } } // As long as we support only component configs with a single kind, this is allowed @@ -125,12 +179,6 @@ func (h *handler) Unmarshal(from kubeadmapi.DocumentMap, into runtime.Object) er return nil } -// known holds the known component config handlers. Add new component configs here. -var known = []*handler{ - &kubeProxyHandler, - &kubeletHandler, -} - // ensureInitializedComponentConfigs is an utility func to initialize the ComponentConfigMap in ClusterConfiguration prior to possible writes to it func ensureInitializedComponentConfigs(clusterCfg *kubeadmapi.ClusterConfiguration) { if clusterCfg.ComponentConfigs == nil { diff --git a/cmd/kubeadm/app/componentconfigs/kubelet.go b/cmd/kubeadm/app/componentconfigs/kubelet.go index add436ef46e..ae13e8822e8 100644 --- a/cmd/kubeadm/app/componentconfigs/kubelet.go +++ b/cmd/kubeadm/app/componentconfigs/kubelet.go @@ -63,7 +63,11 @@ var kubeletHandler = handler{ GroupVersion: kubeletconfig.SchemeGroupVersion, AddToScheme: kubeletconfig.AddToScheme, CreateEmpty: func() kubeadmapi.ComponentConfig { - return &kubeletConfig{} + return &kubeletConfig{ + configBase: configBase{ + GroupVersion: kubeletconfig.SchemeGroupVersion, + }, + } }, fromCluster: kubeletConfigFromCluster, } @@ -81,21 +85,23 @@ func kubeletConfigFromCluster(h *handler, clientset clientset.Interface, cluster // kubeletConfig implements the kubeadmapi.ComponentConfig interface for kubelet type kubeletConfig struct { + configBase config kubeletconfig.KubeletConfiguration } func (kc *kubeletConfig) DeepCopy() kubeadmapi.ComponentConfig { result := &kubeletConfig{} + kc.configBase.DeepCopyInto(&result.configBase) kc.config.DeepCopyInto(&result.config) return result } func (kc *kubeletConfig) Marshal() ([]byte, error) { - return kubeletHandler.Marshal(&kc.config) + return kc.configBase.Marshal(&kc.config) } func (kc *kubeletConfig) Unmarshal(docmap kubeadmapi.DocumentMap) error { - return kubeletHandler.Unmarshal(docmap, &kc.config) + return kc.configBase.Unmarshal(docmap, &kc.config) } func (kc *kubeletConfig) Default(cfg *kubeadmapi.ClusterConfiguration, _ *kubeadmapi.APIEndpoint, nodeRegOpts *kubeadmapi.NodeRegistrationOptions) { diff --git a/cmd/kubeadm/app/componentconfigs/kubelet_test.go b/cmd/kubeadm/app/componentconfigs/kubelet_test.go index 85a1ce8428e..d6f86c721a1 100644 --- a/cmd/kubeadm/app/componentconfigs/kubelet_test.go +++ b/cmd/kubeadm/app/componentconfigs/kubelet_test.go @@ -17,6 +17,8 @@ limitations under the License. package componentconfigs import ( + "crypto/sha256" + "fmt" "path/filepath" "reflect" "strings" @@ -47,7 +49,15 @@ var kubeletMarshalCases = []struct { { name: "Empty config", obj: &kubeletConfig{ - config: kubeletconfig.KubeletConfiguration{}, + configBase: configBase{ + GroupVersion: kubeletconfig.SchemeGroupVersion, + }, + config: kubeletconfig.KubeletConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: kubeletconfig.SchemeGroupVersion.String(), + Kind: "KubeletConfiguration", + }, + }, }, yaml: dedent.Dedent(` apiVersion: kubelet.config.k8s.io/v1beta1 @@ -77,7 +87,14 @@ var kubeletMarshalCases = []struct { { name: "Non empty config", obj: &kubeletConfig{ + configBase: configBase{ + GroupVersion: kubeletconfig.SchemeGroupVersion, + }, config: kubeletconfig.KubeletConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: kubeletconfig.SchemeGroupVersion.String(), + Kind: "KubeletConfiguration", + }, Address: "1.2.3.4", Port: 12345, RotateCertificates: true, @@ -138,17 +155,17 @@ func TestKubeletUnmarshal(t *testing.T) { t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err) } - got := &kubeletConfig{} + got := &kubeletConfig{ + configBase: configBase{ + GroupVersion: kubeletconfig.SchemeGroupVersion, + }, + } if err = got.Unmarshal(gvkmap); err != nil { t.Fatalf("unexpected failure of Unmarshal: %v", err) } - expected := test.obj.DeepCopy().(*kubeletConfig) - expected.config.APIVersion = kubeletHandler.GroupVersion.String() - expected.config.Kind = "KubeletConfiguration" - - if !reflect.DeepEqual(got, expected) { - t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, got) + if !reflect.DeepEqual(got, test.obj) { + t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.obj, got) } }) } @@ -373,10 +390,19 @@ func TestKubeletDefault(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got := &kubeletConfig{} + // This is the same for all test cases so we set it here + expected := test.expected + expected.configBase.GroupVersion = kubeletconfig.SchemeGroupVersion + + got := &kubeletConfig{ + configBase: configBase{ + GroupVersion: kubeletconfig.SchemeGroupVersion, + }, + } got.Default(&test.clusterCfg, &kubeadmapi.APIEndpoint{}, &kubeadmapi.NodeRegistrationOptions{}) - if !reflect.DeepEqual(got, &test.expected) { - t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.expected, *got) + + if !reflect.DeepEqual(got, &expected) { + t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, *got) } }) } @@ -408,14 +434,6 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub `), expectErr: true, }, - { - name: "New kubelet version returns an error", - in: dedent.Dedent(` - apiVersion: kubelet.config.k8s.io/v1 - kind: KubeletConfiguration - `), - expectErr: true, - }, { name: "Wrong kubelet kind returns an error", in: dedent.Dedent(` @@ -434,6 +452,10 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub rotateCertificates: true `), out: &kubeletConfig{ + configBase: configBase{ + GroupVersion: kubeletHandler.GroupVersion, + userSupplied: true, + }, config: kubeletconfig.KubeletConfiguration{ TypeMeta: metav1.TypeMeta{ APIVersion: kubeletHandler.GroupVersion.String(), @@ -458,6 +480,10 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub rotateCertificates: true `), out: &kubeletConfig{ + configBase: configBase{ + GroupVersion: kubeletHandler.GroupVersion, + userSupplied: true, + }, config: kubeletconfig.KubeletConfiguration{ TypeMeta: metav1.TypeMeta{ APIVersion: kubeletHandler.GroupVersion.String(), @@ -492,8 +518,10 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub } else { if test.out == nil { t.Errorf("unexpected result: %v", got) - } else if !reflect.DeepEqual(test.out, got) { - t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got) + } else { + if !reflect.DeepEqual(test.out, got) { + t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got) + } } } } @@ -537,3 +565,71 @@ func TestKubeletFromCluster(t *testing.T) { return kubeletHandler.FromCluster(client, clusterCfg) }) } + +func TestGeneratedKubeletFromCluster(t *testing.T) { + testYAML := dedent.Dedent(` + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + address: 1.2.3.4 + port: 12345 + rotateCertificates: true + `) + testYAMLHash := fmt.Sprintf("sha256:%x", sha256.Sum256([]byte(testYAML))) + // The SHA256 sum of "The quick brown fox jumps over the lazy dog" + const mismatchHash = "sha256:d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592" + tests := []struct { + name string + hash string + userSupplied bool + }{ + { + name: "Matching hash means generated config", + hash: testYAMLHash, + }, + { + name: "Missmatching hash means user supplied config", + hash: mismatchHash, + userSupplied: true, + }, + { + name: "No hash means user supplied config", + userSupplied: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + clusterCfg := &kubeadmapi.ClusterConfiguration{ + KubernetesVersion: constants.CurrentKubernetesVersion.String(), + } + + k8sVersion := version.MustParseGeneric(clusterCfg.KubernetesVersion) + + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.GetKubeletConfigMapName(k8sVersion), + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + constants.KubeletBaseConfigurationConfigMapKey: testYAML, + }, + } + + if test.hash != "" { + configMap.Annotations = map[string]string{ + constants.ComponentConfigHashAnnotationKey: test.hash, + } + } + + client := clientsetfake.NewSimpleClientset(configMap) + cfg, err := kubeletHandler.FromCluster(client, clusterCfg) + if err != nil { + t.Fatalf("unexpected failure of FromCluster: %v", err) + } + + got := cfg.IsUserSupplied() + if got != test.userSupplied { + t.Fatalf("mismatch between expected and got:\n\tExpected: %t\n\tGot: %t", test.userSupplied, got) + } + }) + } +} diff --git a/cmd/kubeadm/app/componentconfigs/kubeproxy.go b/cmd/kubeadm/app/componentconfigs/kubeproxy.go index 1ab3527c3ff..c53beccdce5 100644 --- a/cmd/kubeadm/app/componentconfigs/kubeproxy.go +++ b/cmd/kubeadm/app/componentconfigs/kubeproxy.go @@ -41,7 +41,11 @@ var kubeProxyHandler = handler{ GroupVersion: kubeproxyconfig.SchemeGroupVersion, AddToScheme: kubeproxyconfig.AddToScheme, CreateEmpty: func() kubeadmapi.ComponentConfig { - return &kubeProxyConfig{} + return &kubeProxyConfig{ + configBase: configBase{ + GroupVersion: kubeproxyconfig.SchemeGroupVersion, + }, + } }, fromCluster: kubeProxyConfigFromCluster, } @@ -52,21 +56,23 @@ func kubeProxyConfigFromCluster(h *handler, clientset clientset.Interface, _ *ku // kubeProxyConfig implements the kubeadmapi.ComponentConfig interface for kube-proxy type kubeProxyConfig struct { + configBase config kubeproxyconfig.KubeProxyConfiguration } func (kp *kubeProxyConfig) DeepCopy() kubeadmapi.ComponentConfig { result := &kubeProxyConfig{} + kp.configBase.DeepCopyInto(&result.configBase) kp.config.DeepCopyInto(&result.config) return result } func (kp *kubeProxyConfig) Marshal() ([]byte, error) { - return kubeProxyHandler.Marshal(&kp.config) + return kp.configBase.Marshal(&kp.config) } func (kp *kubeProxyConfig) Unmarshal(docmap kubeadmapi.DocumentMap) error { - return kubeProxyHandler.Unmarshal(docmap, &kp.config) + return kp.configBase.Unmarshal(docmap, &kp.config) } func kubeProxyDefaultBindAddress(localAdvertiseAddress string) string { diff --git a/cmd/kubeadm/app/componentconfigs/kubeproxy_test.go b/cmd/kubeadm/app/componentconfigs/kubeproxy_test.go index eef1513d5be..3cd17d9d50c 100644 --- a/cmd/kubeadm/app/componentconfigs/kubeproxy_test.go +++ b/cmd/kubeadm/app/componentconfigs/kubeproxy_test.go @@ -17,6 +17,8 @@ limitations under the License. package componentconfigs import ( + "crypto/sha256" + "fmt" "reflect" "strings" "testing" @@ -45,7 +47,15 @@ var kubeProxyMarshalCases = []struct { { name: "Empty config", obj: &kubeProxyConfig{ - config: kubeproxyconfig.KubeProxyConfiguration{}, + configBase: configBase{ + GroupVersion: kubeproxyconfig.SchemeGroupVersion, + }, + config: kubeproxyconfig.KubeProxyConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: kubeproxyconfig.SchemeGroupVersion.String(), + Kind: "KubeProxyConfiguration", + }, + }, }, yaml: dedent.Dedent(` apiVersion: kubeproxy.config.k8s.io/v1alpha1 @@ -99,7 +109,14 @@ var kubeProxyMarshalCases = []struct { { name: "Non empty config", obj: &kubeProxyConfig{ + configBase: configBase{ + GroupVersion: kubeproxyconfig.SchemeGroupVersion, + }, config: kubeproxyconfig.KubeProxyConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: kubeproxyconfig.SchemeGroupVersion.String(), + Kind: "KubeProxyConfiguration", + }, BindAddress: "1.2.3.4", EnableProfiling: true, }, @@ -180,17 +197,17 @@ func TestKubeProxyUnmarshal(t *testing.T) { t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err) } - got := &kubeProxyConfig{} + got := &kubeProxyConfig{ + configBase: configBase{ + GroupVersion: kubeproxyconfig.SchemeGroupVersion, + }, + } if err = got.Unmarshal(gvkmap); err != nil { t.Fatalf("unexpected failure of Unmarshal: %v", err) } - expected := test.obj.DeepCopy().(*kubeProxyConfig) - expected.config.APIVersion = kubeProxyHandler.GroupVersion.String() - expected.config.Kind = "KubeProxyConfiguration" - - if !reflect.DeepEqual(got, expected) { - t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, got) + if !reflect.DeepEqual(got, test.obj) { + t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.obj, got) } }) } @@ -296,10 +313,18 @@ func TestKubeProxyDefault(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got := &kubeProxyConfig{} + // This is the same for all test cases so we set it here + expected := test.expected + expected.configBase.GroupVersion = kubeproxyconfig.SchemeGroupVersion + + got := &kubeProxyConfig{ + configBase: configBase{ + GroupVersion: kubeproxyconfig.SchemeGroupVersion, + }, + } got.Default(&test.clusterCfg, &test.endpoint, &kubeadmapi.NodeRegistrationOptions{}) - if !reflect.DeepEqual(got, &test.expected) { - t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.expected, got) + if !reflect.DeepEqual(got, &expected) { + t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, got) } }) } @@ -331,14 +356,6 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k `), expectErr: true, }, - { - name: "New kube-proxy version returns an error", - in: dedent.Dedent(` - apiVersion: kubeproxy.config.k8s.io/v1beta1 - kind: KubeProxyConfiguration - `), - expectErr: true, - }, { name: "Wrong kube-proxy kind returns an error", in: dedent.Dedent(` @@ -356,6 +373,10 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k enableProfiling: true `), out: &kubeProxyConfig{ + configBase: configBase{ + GroupVersion: kubeProxyHandler.GroupVersion, + userSupplied: true, + }, config: kubeproxyconfig.KubeProxyConfiguration{ TypeMeta: metav1.TypeMeta{ APIVersion: kubeProxyHandler.GroupVersion.String(), @@ -378,6 +399,10 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k enableProfiling: true `), out: &kubeProxyConfig{ + configBase: configBase{ + GroupVersion: kubeProxyHandler.GroupVersion, + userSupplied: true, + }, config: kubeproxyconfig.KubeProxyConfiguration{ TypeMeta: metav1.TypeMeta{ APIVersion: kubeProxyHandler.GroupVersion.String(), @@ -411,8 +436,10 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k } else { if test.out == nil { t.Errorf("unexpected result: %v", got) - } else if !reflect.DeepEqual(test.out, got) { - t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got) + } else { + if !reflect.DeepEqual(test.out, got) { + t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got) + } } } } @@ -450,3 +477,64 @@ func TestKubeProxyFromCluster(t *testing.T) { return kubeProxyHandler.FromCluster(client, &kubeadmapi.ClusterConfiguration{}) }) } + +func TestGeneratedKubeProxyFromCluster(t *testing.T) { + testYAML := dedent.Dedent(` + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + bindAddress: 1.2.3.4 + enableProfiling: true + `) + testYAMLHash := fmt.Sprintf("sha256:%x", sha256.Sum256([]byte(testYAML))) + // The SHA256 sum of "The quick brown fox jumps over the lazy dog" + const mismatchHash = "sha256:d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592" + tests := []struct { + name string + hash string + userSupplied bool + }{ + { + name: "Matching hash means generated config", + hash: testYAMLHash, + }, + { + name: "Missmatching hash means user supplied config", + hash: mismatchHash, + userSupplied: true, + }, + { + name: "No hash means user supplied config", + userSupplied: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.KubeProxyConfigMap, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + constants.KubeProxyConfigMapKey: testYAML, + }, + } + + if test.hash != "" { + configMap.Annotations = map[string]string{ + constants.ComponentConfigHashAnnotationKey: test.hash, + } + } + + client := clientsetfake.NewSimpleClientset(configMap) + cfg, err := kubeProxyHandler.FromCluster(client, &kubeadmapi.ClusterConfiguration{}) + if err != nil { + t.Fatalf("unexpected failure of FromCluster: %v", err) + } + + got := cfg.IsUserSupplied() + if got != test.userSupplied { + t.Fatalf("mismatch between expected and got:\n\tExpected: %t\n\tGot: %t", test.userSupplied, got) + } + }) + } +} diff --git a/cmd/kubeadm/app/componentconfigs/utils.go b/cmd/kubeadm/app/componentconfigs/utils.go index d23db632f1b..ad7d53035ca 100644 --- a/cmd/kubeadm/app/componentconfigs/utils.go +++ b/cmd/kubeadm/app/componentconfigs/utils.go @@ -17,9 +17,29 @@ limitations under the License. package componentconfigs import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" ) +// UnsupportedConfigVersionError is a special error type returned whenever we encounter too old config version +type UnsupportedConfigVersionError struct { + // OldVersion is the config version that is causing the problem + OldVersion schema.GroupVersion + + // CurrentVersion describes the natively supported config version + CurrentVersion schema.GroupVersion + + // Document points to the YAML/JSON document that caused the problem + Document []byte +} + +// Error implements the standard Golang error interface for UnsupportedConfigVersionError +func (err *UnsupportedConfigVersionError) Error() string { + return fmt.Sprintf("unsupported apiVersion %q, you may have to do manual conversion to %q and run kubeadm again", err.OldVersion, err.CurrentVersion) +} + // warnDefaultComponentConfigValue prints a warning if the user modified a field in a certain // CompomentConfig from the default recommended value in kubeadm. func warnDefaultComponentConfigValue(componentConfigKind, paramName string, defaultValue, userValue interface{}) { diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index a3372a4e605..36d59cf6e60 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -381,6 +381,9 @@ const ( // KubeAPIServerAdvertiseAddressEndpointAnnotationKey is the annotation key on every apiserver pod, // describing the API endpoint (advertise address and bind port of the api server) KubeAPIServerAdvertiseAddressEndpointAnnotationKey = "kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint" + // ComponentConfigHashAnnotationKey holds the config map annotation key that kubeadm uses to store + // a SHA256 sum to check for user changes + ComponentConfigHashAnnotationKey = "kubeadm.kubernetes.io/component-config.hash" // ControlPlaneTier is the value used in the tier label to identify control plane components ControlPlaneTier = "control-plane" diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index 17be0704ed4..024e43e0fc5 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -51,50 +51,14 @@ func EnsureProxyAddon(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubea return errors.Wrap(err, "error when creating kube-proxy service account") } - // Generate ControlPlane Enpoint kubeconfig file - controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, localEndpoint) - if err != nil { + if err := createKubeProxyConfigMap(cfg, localEndpoint, client); err != nil { return err } - kubeProxyCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeProxyGroup] - if !ok { - return errors.New("no kube-proxy component config found in the active component config set") - } - - proxyBytes, err := kubeProxyCfg.Marshal() - if err != nil { - return errors.Wrap(err, "error when marshaling") - } - var prefixBytes bytes.Buffer - apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ") - var proxyConfigMapBytes, proxyDaemonSetBytes []byte - proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19, - struct { - ControlPlaneEndpoint string - ProxyConfig string - ProxyConfigMap string - ProxyConfigMapKey string - }{ - ControlPlaneEndpoint: controlPlaneEndpoint, - ProxyConfig: prefixBytes.String(), - ProxyConfigMap: constants.KubeProxyConfigMap, - ProxyConfigMapKey: constants.KubeProxyConfigMapKey, - }) - if err != nil { - return errors.Wrap(err, "error when parsing kube-proxy configmap template") - } - proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ Image, ProxyConfigMap, ProxyConfigMapKey string }{ - Image: images.GetKubernetesImage(constants.KubeProxy, cfg), - ProxyConfigMap: constants.KubeProxyConfigMap, - ProxyConfigMapKey: constants.KubeProxyConfigMapKey, - }) - if err != nil { - return errors.Wrap(err, "error when parsing kube-proxy daemonset template") - } - if err := createKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client); err != nil { + if err := createKubeProxyAddon(cfg, client); err != nil { return err } + if err := CreateRBACRules(client); err != nil { return errors.Wrap(err, "error when creating kube-proxy RBAC rules") } @@ -119,15 +83,61 @@ func CreateRBACRules(client clientset.Interface) error { return createClusterRoleBindings(client) } -func createKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client clientset.Interface) error { +func createKubeProxyConfigMap(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubeadmapi.APIEndpoint, client clientset.Interface) error { + // Generate ControlPlane Enpoint kubeconfig file + controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, localEndpoint) + if err != nil { + return err + } + + kubeProxyCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeProxyGroup] + if !ok { + return errors.New("no kube-proxy component config found in the active component config set") + } + + proxyBytes, err := kubeProxyCfg.Marshal() + if err != nil { + return errors.Wrap(err, "error when marshaling") + } + var prefixBytes bytes.Buffer + apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ") + configMapBytes, err := kubeadmutil.ParseTemplate(KubeProxyConfigMap19, + struct { + ControlPlaneEndpoint string + ProxyConfig string + ProxyConfigMap string + ProxyConfigMapKey string + }{ + ControlPlaneEndpoint: controlPlaneEndpoint, + ProxyConfig: prefixBytes.String(), + ProxyConfigMap: constants.KubeProxyConfigMap, + ProxyConfigMapKey: constants.KubeProxyConfigMapKey, + }) + if err != nil { + return errors.Wrap(err, "error when parsing kube-proxy configmap template") + } + kubeproxyConfigMap := &v1.ConfigMap{} if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil { return errors.Wrap(err, "unable to decode kube-proxy configmap") } + if !kubeProxyCfg.IsUserSupplied() { + componentconfigs.SignConfigMap(kubeproxyConfigMap) + } + // Create the ConfigMap for kube-proxy or update it in case it already exists - if err := apiclient.CreateOrUpdateConfigMap(client, kubeproxyConfigMap); err != nil { - return err + return apiclient.CreateOrUpdateConfigMap(client, kubeproxyConfigMap) +} + +func createKubeProxyAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error { + daemonSetbytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ Image, ProxyConfigMap, ProxyConfigMapKey string }{ + Image: images.GetKubernetesImage(constants.KubeProxy, cfg), + ProxyConfigMap: constants.KubeProxyConfigMap, + ProxyConfigMapKey: constants.KubeProxyConfigMapKey, + }) + if err != nil { + return errors.Wrap(err, "error when parsing kube-proxy daemonset template") } kubeproxyDaemonSet := &apps.DaemonSet{} diff --git a/cmd/kubeadm/app/phases/kubelet/config.go b/cmd/kubeadm/app/phases/kubelet/config.go index 1aeb39b6ebd..a0a07612901 100644 --- a/cmd/kubeadm/app/phases/kubelet/config.go +++ b/cmd/kubeadm/app/phases/kubelet/config.go @@ -40,7 +40,7 @@ import ( func WriteConfigToDisk(cfg *kubeadmapi.ClusterConfiguration, kubeletDir string) error { kubeletCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeletGroup] if !ok { - return errors.New("no kubelet component config found in the active component config set") + return errors.New("no kubelet component config found") } kubeletBytes, err := kubeletCfg.Marshal() @@ -73,7 +73,7 @@ func CreateConfigMap(cfg *kubeadmapi.ClusterConfiguration, client clientset.Inte return err } - if err := apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{ + configMap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: configMapName, Namespace: metav1.NamespaceSystem, @@ -81,7 +81,13 @@ func CreateConfigMap(cfg *kubeadmapi.ClusterConfiguration, client clientset.Inte Data: map[string]string{ kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes), }, - }); err != nil { + } + + if !kubeletCfg.IsUserSupplied() { + componentconfigs.SignConfigMap(configMap) + } + + if err := apiclient.CreateOrUpdateConfigMap(client, configMap); err != nil { return err }