mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
kubeadm: Distinguish between user supplied and generated component configs
Until now, users were always asked to manually convert a component config to a version supported by kubeadm, if kubeadm is not supporting its version. This is true even for configs generated with older kubeadm versions, hence getting users to make manual conversions on kubeadm generated configs. This is not appropriate and user friendly, although, it tends to be the most common case. Hence, we sign kubeadm generated component configs stored in config maps with a SHA256 checksum. If a configs is loaded by kubeadm from a config map and has a valid signature it's considered "kubeadm generated" and if a version migration is required, this config is automatically discarded and a new one is generated. If there is no checksum or the checksum is not matching, the config is considered as "user supplied" and, if a version migration is required, kubeadm will bail out with an error, requiring manual config migration (as it's today). The behavior when supplying component configs on the kubeadm command line does not change. Kubeadm would still bail out with an error requiring migration if it can recognize their groups but not versions. Signed-off-by: Rostislav M. Georgiev <rostislavg@vmware.com>
This commit is contained in:
parent
543f29be4e
commit
5d6cf8ecd4
@ -445,6 +445,12 @@ type ComponentConfig interface {
|
||||
|
||||
// Default patches the component config with kubeadm preferred defaults
|
||||
Default(cfg *ClusterConfiguration, localAPIEndpoint *APIEndpoint, nodeRegOpts *NodeRegistrationOptions)
|
||||
|
||||
// IsUserSupplied indicates if the component config was supplied or modified by a user or was kubeadm generated
|
||||
IsUserSupplied() bool
|
||||
|
||||
// SetUserSupplied sets the state of the component config "user supplied" flag to, either true, or false.
|
||||
SetUserSupplied(userSupplied bool)
|
||||
}
|
||||
|
||||
// ComponentConfigMap is a map between a group name (as in GVK group) and a ComponentConfig
|
||||
|
@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checksums.go",
|
||||
"configset.go",
|
||||
"kubelet.go",
|
||||
"kubeproxy.go",
|
||||
@ -19,6 +20,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/initsystem:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@ -40,6 +42,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"checksums_test.go",
|
||||
"configset_test.go",
|
||||
"kubelet_test.go",
|
||||
"kubeproxy_test.go",
|
||||
|
74
cmd/kubeadm/app/componentconfigs/checksums.go
Normal file
74
cmd/kubeadm/app/componentconfigs/checksums.go
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package componentconfigs
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
|
||||
// ChecksumForConfigMap calculates a checksum for the supplied config map. The exact algorithm depends on hash and prefix parameters
|
||||
func ChecksumForConfigMap(cm *v1.ConfigMap) string {
|
||||
hash := sha256.New()
|
||||
|
||||
// Since maps are not ordered we need to make sure we order them somehow so we'll always get the same checksums
|
||||
// for the same config maps. The solution here is to extract the keys into a slice and sort them.
|
||||
// Then iterate over that slice to fetch the values to be hashed.
|
||||
keys := []string{}
|
||||
for key := range cm.Data {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
hash.Write([]byte(cm.Data[key]))
|
||||
}
|
||||
|
||||
// Do the same as above, but for binaryData this time.
|
||||
keys = []string{}
|
||||
for key := range cm.BinaryData {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
hash.Write(cm.BinaryData[key])
|
||||
}
|
||||
|
||||
return fmt.Sprintf("sha256:%x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
// SignConfigMap calculates the supplied config map checksum and annotates it with it
|
||||
func SignConfigMap(cm *v1.ConfigMap) {
|
||||
if cm.Annotations == nil {
|
||||
cm.Annotations = map[string]string{}
|
||||
}
|
||||
cm.Annotations[constants.ComponentConfigHashAnnotationKey] = ChecksumForConfigMap(cm)
|
||||
}
|
||||
|
||||
// VerifyConfigMapSignature returns true if the config map has checksum annotation and it matches; false otherwise
|
||||
func VerifyConfigMapSignature(cm *v1.ConfigMap) bool {
|
||||
signature, ok := cm.Annotations[constants.ComponentConfigHashAnnotationKey]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return signature == ChecksumForConfigMap(cm)
|
||||
}
|
224
cmd/kubeadm/app/componentconfigs/checksums_test.go
Normal file
224
cmd/kubeadm/app/componentconfigs/checksums_test.go
Normal file
@ -0,0 +1,224 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package componentconfigs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
|
||||
var checksumTestCases = []struct {
|
||||
desc string
|
||||
configMap *v1.ConfigMap
|
||||
checksum string
|
||||
}{
|
||||
{
|
||||
desc: "checksum is calculated using both data and binaryData",
|
||||
checksum: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238",
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"bar": []byte("baz"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "config keys have no effect on the checksum",
|
||||
checksum: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238",
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo2": "bar",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"bar2": []byte("baz"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "metadata fields have no effect on the checksum",
|
||||
checksum: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238",
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "le-config",
|
||||
Namespace: "le-namespace",
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
"label2": "value2",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"annotation1": "value1",
|
||||
"annotation2": "value2",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"bar": []byte("baz"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "checksum can be calculated without binaryData",
|
||||
checksum: "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "checksum can be calculated without data",
|
||||
checksum: "sha256:baa5a0964d3320fbc0c6a922140453c8513ea24ab8fd0577034804a967248096",
|
||||
configMap: &v1.ConfigMap{
|
||||
BinaryData: map[string][]byte{
|
||||
"bar": []byte("baz"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestChecksumForConfigMap(t *testing.T) {
|
||||
for _, test := range checksumTestCases {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
got := ChecksumForConfigMap(test.configMap)
|
||||
if got != test.checksum {
|
||||
t.Errorf("checksum mismatch - got %q, expected %q", got, test.checksum)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignConfigMap(t *testing.T) {
|
||||
for _, test := range checksumTestCases {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
target := test.configMap.DeepCopy()
|
||||
SignConfigMap(target)
|
||||
|
||||
// Verify that we have a correct annotation
|
||||
signature, ok := target.Annotations[constants.ComponentConfigHashAnnotationKey]
|
||||
if !ok {
|
||||
t.Errorf("no %s annotation found in the config map", constants.ComponentConfigHashAnnotationKey)
|
||||
} else {
|
||||
if signature != test.checksum {
|
||||
t.Errorf("unexpected checksum - got %q, expected %q", signature, test.checksum)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that we have added an annotation (and not overwritten them)
|
||||
expectedAnnotationCount := 1 + len(test.configMap.Annotations)
|
||||
if len(target.Annotations) != expectedAnnotationCount {
|
||||
t.Errorf("unexpected number of annotations - got %d, expected %d", len(target.Annotations), expectedAnnotationCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyConfigMapSignature(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
configMap *v1.ConfigMap
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
desc: "correct signature is acknowledged",
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "le-config",
|
||||
Namespace: "le-namespace",
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
"label2": "value2",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"annotation1": "value1",
|
||||
"annotation2": "value2",
|
||||
constants.ComponentConfigHashAnnotationKey: "sha256:c8f8b724728a6d6684106e5e64e94ce811c9965d19dd44dd073cf86cf43bc238",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"bar": []byte("baz"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "wrong checksum leads to failure",
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "le-config",
|
||||
Namespace: "le-namespace",
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
"label2": "value2",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"annotation1": "value1",
|
||||
"annotation2": "value2",
|
||||
constants.ComponentConfigHashAnnotationKey: "sha256:832cb34fc68fc370dd44dd91d5699c118ec49e46e5e6014866d6a827427b8f8c",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"bar": []byte("baz"),
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
desc: "missing signature means error",
|
||||
configMap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "le-config",
|
||||
Namespace: "le-namespace",
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
"label2": "value2",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"annotation1": "value1",
|
||||
"annotation2": "value2",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"bar": []byte("baz"),
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
result := VerifyConfigMapSignature(test.configMap)
|
||||
if result != !test.expectErr {
|
||||
t.Errorf("unexpected result - got %t, expected %t", result, !test.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -61,6 +61,8 @@ func (h *handler) FromDocumentMap(docmap kubeadmapi.DocumentMap) (kubeadmapi.Com
|
||||
if err := cfg.Unmarshal(docmap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// consider all successfully loaded configs from a document map as user supplied
|
||||
cfg.SetUserSupplied(true)
|
||||
return cfg, nil
|
||||
}
|
||||
}
|
||||
@ -89,7 +91,24 @@ func (h *handler) fromConfigMap(client clientset.Interface, cmName, cmKey string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h.FromDocumentMap(gvkmap)
|
||||
// If the checksum comes up neatly we assume the config was generated
|
||||
generatedConfig := VerifyConfigMapSignature(configMap)
|
||||
|
||||
componentCfg, err := h.FromDocumentMap(gvkmap)
|
||||
if err != nil {
|
||||
// If the config was generated and we get UnsupportedConfigVersionError, we skip loading it.
|
||||
// This will force us to use the generated default current version (effectively regenerating the config with the current version).
|
||||
if _, ok := err.(*UnsupportedConfigVersionError); ok && generatedConfig {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if componentCfg != nil {
|
||||
componentCfg.SetUserSupplied(!generatedConfig)
|
||||
}
|
||||
|
||||
return componentCfg, nil
|
||||
}
|
||||
|
||||
// FromCluster loads a component from a config map in the cluster
|
||||
@ -97,25 +116,60 @@ func (h *handler) FromCluster(clientset clientset.Interface, clusterCfg *kubeadm
|
||||
return h.fromCluster(h, clientset, clusterCfg)
|
||||
}
|
||||
|
||||
// known holds the known component config handlers. Add new component configs here.
|
||||
var known = []*handler{
|
||||
&kubeProxyHandler,
|
||||
&kubeletHandler,
|
||||
}
|
||||
|
||||
// configBase is the base type for all component config implementations
|
||||
type configBase struct {
|
||||
// GroupVersion holds the supported GroupVersion for the inheriting config
|
||||
GroupVersion schema.GroupVersion
|
||||
|
||||
// userSupplied tells us if the config is user supplied (invalid checksum) or not
|
||||
userSupplied bool
|
||||
}
|
||||
|
||||
func (cb *configBase) IsUserSupplied() bool {
|
||||
return cb.userSupplied
|
||||
}
|
||||
|
||||
func (cb *configBase) SetUserSupplied(userSupplied bool) {
|
||||
cb.userSupplied = userSupplied
|
||||
}
|
||||
|
||||
func (cb *configBase) DeepCopyInto(other *configBase) {
|
||||
*other = *cb
|
||||
}
|
||||
|
||||
func cloneBytes(in []byte) []byte {
|
||||
out := make([]byte, len(in))
|
||||
copy(out, in)
|
||||
return out
|
||||
}
|
||||
|
||||
// Marshal is an utility function, used by the component config support implementations to marshal a runtime.Object to YAML with the
|
||||
// correct group and version
|
||||
func (h *handler) Marshal(object runtime.Object) ([]byte, error) {
|
||||
return kubeadmutil.MarshalToYamlForCodecs(object, h.GroupVersion, Codecs)
|
||||
func (cb *configBase) Marshal(object runtime.Object) ([]byte, error) {
|
||||
return kubeadmutil.MarshalToYamlForCodecs(object, cb.GroupVersion, Codecs)
|
||||
}
|
||||
|
||||
// Unmarshal attempts to unmarshal a runtime.Object from a document map. If no object is found, no error is returned.
|
||||
// If a matching group is found, but no matching version an error is returned indicating that users should do manual conversion.
|
||||
func (h *handler) Unmarshal(from kubeadmapi.DocumentMap, into runtime.Object) error {
|
||||
func (cb *configBase) Unmarshal(from kubeadmapi.DocumentMap, into runtime.Object) error {
|
||||
for gvk, yaml := range from {
|
||||
// If this is a different group, we ignore it
|
||||
if gvk.Group != h.GroupVersion.Group {
|
||||
if gvk.Group != cb.GroupVersion.Group {
|
||||
continue
|
||||
}
|
||||
|
||||
// If this is the correct group, but different version, we return an error
|
||||
if gvk.Version != h.GroupVersion.Version {
|
||||
// TODO: Replace this with a special error type and make UX better around it
|
||||
return errors.Errorf("unexpected apiVersion %q, you may have to do manual conversion to %q and execute kubeadm again", gvk.GroupVersion(), h.GroupVersion)
|
||||
if gvk.Version != cb.GroupVersion.Version {
|
||||
return &UnsupportedConfigVersionError{
|
||||
OldVersion: gvk.GroupVersion(),
|
||||
CurrentVersion: cb.GroupVersion,
|
||||
Document: cloneBytes(yaml),
|
||||
}
|
||||
}
|
||||
|
||||
// As long as we support only component configs with a single kind, this is allowed
|
||||
@ -125,12 +179,6 @@ func (h *handler) Unmarshal(from kubeadmapi.DocumentMap, into runtime.Object) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// known holds the known component config handlers. Add new component configs here.
|
||||
var known = []*handler{
|
||||
&kubeProxyHandler,
|
||||
&kubeletHandler,
|
||||
}
|
||||
|
||||
// ensureInitializedComponentConfigs is an utility func to initialize the ComponentConfigMap in ClusterConfiguration prior to possible writes to it
|
||||
func ensureInitializedComponentConfigs(clusterCfg *kubeadmapi.ClusterConfiguration) {
|
||||
if clusterCfg.ComponentConfigs == nil {
|
||||
|
@ -63,7 +63,11 @@ var kubeletHandler = handler{
|
||||
GroupVersion: kubeletconfig.SchemeGroupVersion,
|
||||
AddToScheme: kubeletconfig.AddToScheme,
|
||||
CreateEmpty: func() kubeadmapi.ComponentConfig {
|
||||
return &kubeletConfig{}
|
||||
return &kubeletConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletconfig.SchemeGroupVersion,
|
||||
},
|
||||
}
|
||||
},
|
||||
fromCluster: kubeletConfigFromCluster,
|
||||
}
|
||||
@ -81,21 +85,23 @@ func kubeletConfigFromCluster(h *handler, clientset clientset.Interface, cluster
|
||||
|
||||
// kubeletConfig implements the kubeadmapi.ComponentConfig interface for kubelet
|
||||
type kubeletConfig struct {
|
||||
configBase
|
||||
config kubeletconfig.KubeletConfiguration
|
||||
}
|
||||
|
||||
func (kc *kubeletConfig) DeepCopy() kubeadmapi.ComponentConfig {
|
||||
result := &kubeletConfig{}
|
||||
kc.configBase.DeepCopyInto(&result.configBase)
|
||||
kc.config.DeepCopyInto(&result.config)
|
||||
return result
|
||||
}
|
||||
|
||||
func (kc *kubeletConfig) Marshal() ([]byte, error) {
|
||||
return kubeletHandler.Marshal(&kc.config)
|
||||
return kc.configBase.Marshal(&kc.config)
|
||||
}
|
||||
|
||||
func (kc *kubeletConfig) Unmarshal(docmap kubeadmapi.DocumentMap) error {
|
||||
return kubeletHandler.Unmarshal(docmap, &kc.config)
|
||||
return kc.configBase.Unmarshal(docmap, &kc.config)
|
||||
}
|
||||
|
||||
func (kc *kubeletConfig) Default(cfg *kubeadmapi.ClusterConfiguration, _ *kubeadmapi.APIEndpoint, nodeRegOpts *kubeadmapi.NodeRegistrationOptions) {
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package componentconfigs
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -47,7 +49,15 @@ var kubeletMarshalCases = []struct {
|
||||
{
|
||||
name: "Empty config",
|
||||
obj: &kubeletConfig{
|
||||
config: kubeletconfig.KubeletConfiguration{},
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletconfig.SchemeGroupVersion,
|
||||
},
|
||||
config: kubeletconfig.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletconfig.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
},
|
||||
},
|
||||
yaml: dedent.Dedent(`
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
@ -77,7 +87,14 @@ var kubeletMarshalCases = []struct {
|
||||
{
|
||||
name: "Non empty config",
|
||||
obj: &kubeletConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletconfig.SchemeGroupVersion,
|
||||
},
|
||||
config: kubeletconfig.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletconfig.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
Address: "1.2.3.4",
|
||||
Port: 12345,
|
||||
RotateCertificates: true,
|
||||
@ -138,17 +155,17 @@ func TestKubeletUnmarshal(t *testing.T) {
|
||||
t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err)
|
||||
}
|
||||
|
||||
got := &kubeletConfig{}
|
||||
got := &kubeletConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletconfig.SchemeGroupVersion,
|
||||
},
|
||||
}
|
||||
if err = got.Unmarshal(gvkmap); err != nil {
|
||||
t.Fatalf("unexpected failure of Unmarshal: %v", err)
|
||||
}
|
||||
|
||||
expected := test.obj.DeepCopy().(*kubeletConfig)
|
||||
expected.config.APIVersion = kubeletHandler.GroupVersion.String()
|
||||
expected.config.Kind = "KubeletConfiguration"
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, got)
|
||||
if !reflect.DeepEqual(got, test.obj) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.obj, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -373,10 +390,19 @@ func TestKubeletDefault(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := &kubeletConfig{}
|
||||
// This is the same for all test cases so we set it here
|
||||
expected := test.expected
|
||||
expected.configBase.GroupVersion = kubeletconfig.SchemeGroupVersion
|
||||
|
||||
got := &kubeletConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletconfig.SchemeGroupVersion,
|
||||
},
|
||||
}
|
||||
got.Default(&test.clusterCfg, &kubeadmapi.APIEndpoint{}, &kubeadmapi.NodeRegistrationOptions{})
|
||||
if !reflect.DeepEqual(got, &test.expected) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.expected, *got)
|
||||
|
||||
if !reflect.DeepEqual(got, &expected) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, *got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -408,14 +434,6 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub
|
||||
`),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "New kubelet version returns an error",
|
||||
in: dedent.Dedent(`
|
||||
apiVersion: kubelet.config.k8s.io/v1
|
||||
kind: KubeletConfiguration
|
||||
`),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Wrong kubelet kind returns an error",
|
||||
in: dedent.Dedent(`
|
||||
@ -434,6 +452,10 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub
|
||||
rotateCertificates: true
|
||||
`),
|
||||
out: &kubeletConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletHandler.GroupVersion,
|
||||
userSupplied: true,
|
||||
},
|
||||
config: kubeletconfig.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletHandler.GroupVersion.String(),
|
||||
@ -458,6 +480,10 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub
|
||||
rotateCertificates: true
|
||||
`),
|
||||
out: &kubeletConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeletHandler.GroupVersion,
|
||||
userSupplied: true,
|
||||
},
|
||||
config: kubeletconfig.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletHandler.GroupVersion.String(),
|
||||
@ -492,8 +518,10 @@ func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kub
|
||||
} else {
|
||||
if test.out == nil {
|
||||
t.Errorf("unexpected result: %v", got)
|
||||
} else if !reflect.DeepEqual(test.out, got) {
|
||||
t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got)
|
||||
} else {
|
||||
if !reflect.DeepEqual(test.out, got) {
|
||||
t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -537,3 +565,71 @@ func TestKubeletFromCluster(t *testing.T) {
|
||||
return kubeletHandler.FromCluster(client, clusterCfg)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGeneratedKubeletFromCluster(t *testing.T) {
|
||||
testYAML := dedent.Dedent(`
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
address: 1.2.3.4
|
||||
port: 12345
|
||||
rotateCertificates: true
|
||||
`)
|
||||
testYAMLHash := fmt.Sprintf("sha256:%x", sha256.Sum256([]byte(testYAML)))
|
||||
// The SHA256 sum of "The quick brown fox jumps over the lazy dog"
|
||||
const mismatchHash = "sha256:d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592"
|
||||
tests := []struct {
|
||||
name string
|
||||
hash string
|
||||
userSupplied bool
|
||||
}{
|
||||
{
|
||||
name: "Matching hash means generated config",
|
||||
hash: testYAMLHash,
|
||||
},
|
||||
{
|
||||
name: "Missmatching hash means user supplied config",
|
||||
hash: mismatchHash,
|
||||
userSupplied: true,
|
||||
},
|
||||
{
|
||||
name: "No hash means user supplied config",
|
||||
userSupplied: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
clusterCfg := &kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: constants.CurrentKubernetesVersion.String(),
|
||||
}
|
||||
|
||||
k8sVersion := version.MustParseGeneric(clusterCfg.KubernetesVersion)
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.GetKubeletConfigMapName(k8sVersion),
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Data: map[string]string{
|
||||
constants.KubeletBaseConfigurationConfigMapKey: testYAML,
|
||||
},
|
||||
}
|
||||
|
||||
if test.hash != "" {
|
||||
configMap.Annotations = map[string]string{
|
||||
constants.ComponentConfigHashAnnotationKey: test.hash,
|
||||
}
|
||||
}
|
||||
|
||||
client := clientsetfake.NewSimpleClientset(configMap)
|
||||
cfg, err := kubeletHandler.FromCluster(client, clusterCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected failure of FromCluster: %v", err)
|
||||
}
|
||||
|
||||
got := cfg.IsUserSupplied()
|
||||
if got != test.userSupplied {
|
||||
t.Fatalf("mismatch between expected and got:\n\tExpected: %t\n\tGot: %t", test.userSupplied, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,11 @@ var kubeProxyHandler = handler{
|
||||
GroupVersion: kubeproxyconfig.SchemeGroupVersion,
|
||||
AddToScheme: kubeproxyconfig.AddToScheme,
|
||||
CreateEmpty: func() kubeadmapi.ComponentConfig {
|
||||
return &kubeProxyConfig{}
|
||||
return &kubeProxyConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeproxyconfig.SchemeGroupVersion,
|
||||
},
|
||||
}
|
||||
},
|
||||
fromCluster: kubeProxyConfigFromCluster,
|
||||
}
|
||||
@ -52,21 +56,23 @@ func kubeProxyConfigFromCluster(h *handler, clientset clientset.Interface, _ *ku
|
||||
|
||||
// kubeProxyConfig implements the kubeadmapi.ComponentConfig interface for kube-proxy
|
||||
type kubeProxyConfig struct {
|
||||
configBase
|
||||
config kubeproxyconfig.KubeProxyConfiguration
|
||||
}
|
||||
|
||||
func (kp *kubeProxyConfig) DeepCopy() kubeadmapi.ComponentConfig {
|
||||
result := &kubeProxyConfig{}
|
||||
kp.configBase.DeepCopyInto(&result.configBase)
|
||||
kp.config.DeepCopyInto(&result.config)
|
||||
return result
|
||||
}
|
||||
|
||||
func (kp *kubeProxyConfig) Marshal() ([]byte, error) {
|
||||
return kubeProxyHandler.Marshal(&kp.config)
|
||||
return kp.configBase.Marshal(&kp.config)
|
||||
}
|
||||
|
||||
func (kp *kubeProxyConfig) Unmarshal(docmap kubeadmapi.DocumentMap) error {
|
||||
return kubeProxyHandler.Unmarshal(docmap, &kp.config)
|
||||
return kp.configBase.Unmarshal(docmap, &kp.config)
|
||||
}
|
||||
|
||||
func kubeProxyDefaultBindAddress(localAdvertiseAddress string) string {
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package componentconfigs
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -45,7 +47,15 @@ var kubeProxyMarshalCases = []struct {
|
||||
{
|
||||
name: "Empty config",
|
||||
obj: &kubeProxyConfig{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{},
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeproxyconfig.SchemeGroupVersion,
|
||||
},
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeproxyconfig.SchemeGroupVersion.String(),
|
||||
Kind: "KubeProxyConfiguration",
|
||||
},
|
||||
},
|
||||
},
|
||||
yaml: dedent.Dedent(`
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
@ -99,7 +109,14 @@ var kubeProxyMarshalCases = []struct {
|
||||
{
|
||||
name: "Non empty config",
|
||||
obj: &kubeProxyConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeproxyconfig.SchemeGroupVersion,
|
||||
},
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeproxyconfig.SchemeGroupVersion.String(),
|
||||
Kind: "KubeProxyConfiguration",
|
||||
},
|
||||
BindAddress: "1.2.3.4",
|
||||
EnableProfiling: true,
|
||||
},
|
||||
@ -180,17 +197,17 @@ func TestKubeProxyUnmarshal(t *testing.T) {
|
||||
t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err)
|
||||
}
|
||||
|
||||
got := &kubeProxyConfig{}
|
||||
got := &kubeProxyConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeproxyconfig.SchemeGroupVersion,
|
||||
},
|
||||
}
|
||||
if err = got.Unmarshal(gvkmap); err != nil {
|
||||
t.Fatalf("unexpected failure of Unmarshal: %v", err)
|
||||
}
|
||||
|
||||
expected := test.obj.DeepCopy().(*kubeProxyConfig)
|
||||
expected.config.APIVersion = kubeProxyHandler.GroupVersion.String()
|
||||
expected.config.Kind = "KubeProxyConfiguration"
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, got)
|
||||
if !reflect.DeepEqual(got, test.obj) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.obj, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -296,10 +313,18 @@ func TestKubeProxyDefault(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := &kubeProxyConfig{}
|
||||
// This is the same for all test cases so we set it here
|
||||
expected := test.expected
|
||||
expected.configBase.GroupVersion = kubeproxyconfig.SchemeGroupVersion
|
||||
|
||||
got := &kubeProxyConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeproxyconfig.SchemeGroupVersion,
|
||||
},
|
||||
}
|
||||
got.Default(&test.clusterCfg, &test.endpoint, &kubeadmapi.NodeRegistrationOptions{})
|
||||
if !reflect.DeepEqual(got, &test.expected) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.expected, got)
|
||||
if !reflect.DeepEqual(got, &expected) {
|
||||
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -331,14 +356,6 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k
|
||||
`),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "New kube-proxy version returns an error",
|
||||
in: dedent.Dedent(`
|
||||
apiVersion: kubeproxy.config.k8s.io/v1beta1
|
||||
kind: KubeProxyConfiguration
|
||||
`),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Wrong kube-proxy kind returns an error",
|
||||
in: dedent.Dedent(`
|
||||
@ -356,6 +373,10 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k
|
||||
enableProfiling: true
|
||||
`),
|
||||
out: &kubeProxyConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeProxyHandler.GroupVersion,
|
||||
userSupplied: true,
|
||||
},
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeProxyHandler.GroupVersion.String(),
|
||||
@ -378,6 +399,10 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k
|
||||
enableProfiling: true
|
||||
`),
|
||||
out: &kubeProxyConfig{
|
||||
configBase: configBase{
|
||||
GroupVersion: kubeProxyHandler.GroupVersion,
|
||||
userSupplied: true,
|
||||
},
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeProxyHandler.GroupVersion.String(),
|
||||
@ -411,8 +436,10 @@ func runKubeProxyFromTest(t *testing.T, perform func(t *testing.T, in string) (k
|
||||
} else {
|
||||
if test.out == nil {
|
||||
t.Errorf("unexpected result: %v", got)
|
||||
} else if !reflect.DeepEqual(test.out, got) {
|
||||
t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got)
|
||||
} else {
|
||||
if !reflect.DeepEqual(test.out, got) {
|
||||
t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -450,3 +477,64 @@ func TestKubeProxyFromCluster(t *testing.T) {
|
||||
return kubeProxyHandler.FromCluster(client, &kubeadmapi.ClusterConfiguration{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGeneratedKubeProxyFromCluster(t *testing.T) {
|
||||
testYAML := dedent.Dedent(`
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
bindAddress: 1.2.3.4
|
||||
enableProfiling: true
|
||||
`)
|
||||
testYAMLHash := fmt.Sprintf("sha256:%x", sha256.Sum256([]byte(testYAML)))
|
||||
// The SHA256 sum of "The quick brown fox jumps over the lazy dog"
|
||||
const mismatchHash = "sha256:d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592"
|
||||
tests := []struct {
|
||||
name string
|
||||
hash string
|
||||
userSupplied bool
|
||||
}{
|
||||
{
|
||||
name: "Matching hash means generated config",
|
||||
hash: testYAMLHash,
|
||||
},
|
||||
{
|
||||
name: "Missmatching hash means user supplied config",
|
||||
hash: mismatchHash,
|
||||
userSupplied: true,
|
||||
},
|
||||
{
|
||||
name: "No hash means user supplied config",
|
||||
userSupplied: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.KubeProxyConfigMap,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Data: map[string]string{
|
||||
constants.KubeProxyConfigMapKey: testYAML,
|
||||
},
|
||||
}
|
||||
|
||||
if test.hash != "" {
|
||||
configMap.Annotations = map[string]string{
|
||||
constants.ComponentConfigHashAnnotationKey: test.hash,
|
||||
}
|
||||
}
|
||||
|
||||
client := clientsetfake.NewSimpleClientset(configMap)
|
||||
cfg, err := kubeProxyHandler.FromCluster(client, &kubeadmapi.ClusterConfiguration{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected failure of FromCluster: %v", err)
|
||||
}
|
||||
|
||||
got := cfg.IsUserSupplied()
|
||||
if got != test.userSupplied {
|
||||
t.Fatalf("mismatch between expected and got:\n\tExpected: %t\n\tGot: %t", test.userSupplied, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -17,9 +17,29 @@ limitations under the License.
|
||||
package componentconfigs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// UnsupportedConfigVersionError is a special error type returned whenever we encounter too old config version
|
||||
type UnsupportedConfigVersionError struct {
|
||||
// OldVersion is the config version that is causing the problem
|
||||
OldVersion schema.GroupVersion
|
||||
|
||||
// CurrentVersion describes the natively supported config version
|
||||
CurrentVersion schema.GroupVersion
|
||||
|
||||
// Document points to the YAML/JSON document that caused the problem
|
||||
Document []byte
|
||||
}
|
||||
|
||||
// Error implements the standard Golang error interface for UnsupportedConfigVersionError
|
||||
func (err *UnsupportedConfigVersionError) Error() string {
|
||||
return fmt.Sprintf("unsupported apiVersion %q, you may have to do manual conversion to %q and run kubeadm again", err.OldVersion, err.CurrentVersion)
|
||||
}
|
||||
|
||||
// warnDefaultComponentConfigValue prints a warning if the user modified a field in a certain
|
||||
// CompomentConfig from the default recommended value in kubeadm.
|
||||
func warnDefaultComponentConfigValue(componentConfigKind, paramName string, defaultValue, userValue interface{}) {
|
||||
|
@ -381,6 +381,9 @@ const (
|
||||
// KubeAPIServerAdvertiseAddressEndpointAnnotationKey is the annotation key on every apiserver pod,
|
||||
// describing the API endpoint (advertise address and bind port of the api server)
|
||||
KubeAPIServerAdvertiseAddressEndpointAnnotationKey = "kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint"
|
||||
// ComponentConfigHashAnnotationKey holds the config map annotation key that kubeadm uses to store
|
||||
// a SHA256 sum to check for user changes
|
||||
ComponentConfigHashAnnotationKey = "kubeadm.kubernetes.io/component-config.hash"
|
||||
|
||||
// ControlPlaneTier is the value used in the tier label to identify control plane components
|
||||
ControlPlaneTier = "control-plane"
|
||||
|
@ -51,50 +51,14 @@ func EnsureProxyAddon(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubea
|
||||
return errors.Wrap(err, "error when creating kube-proxy service account")
|
||||
}
|
||||
|
||||
// Generate ControlPlane Enpoint kubeconfig file
|
||||
controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, localEndpoint)
|
||||
if err != nil {
|
||||
if err := createKubeProxyConfigMap(cfg, localEndpoint, client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeProxyCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeProxyGroup]
|
||||
if !ok {
|
||||
return errors.New("no kube-proxy component config found in the active component config set")
|
||||
}
|
||||
|
||||
proxyBytes, err := kubeProxyCfg.Marshal()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error when marshaling")
|
||||
}
|
||||
var prefixBytes bytes.Buffer
|
||||
apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ")
|
||||
var proxyConfigMapBytes, proxyDaemonSetBytes []byte
|
||||
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
|
||||
struct {
|
||||
ControlPlaneEndpoint string
|
||||
ProxyConfig string
|
||||
ProxyConfigMap string
|
||||
ProxyConfigMapKey string
|
||||
}{
|
||||
ControlPlaneEndpoint: controlPlaneEndpoint,
|
||||
ProxyConfig: prefixBytes.String(),
|
||||
ProxyConfigMap: constants.KubeProxyConfigMap,
|
||||
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error when parsing kube-proxy configmap template")
|
||||
}
|
||||
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ Image, ProxyConfigMap, ProxyConfigMapKey string }{
|
||||
Image: images.GetKubernetesImage(constants.KubeProxy, cfg),
|
||||
ProxyConfigMap: constants.KubeProxyConfigMap,
|
||||
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error when parsing kube-proxy daemonset template")
|
||||
}
|
||||
if err := createKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client); err != nil {
|
||||
if err := createKubeProxyAddon(cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := CreateRBACRules(client); err != nil {
|
||||
return errors.Wrap(err, "error when creating kube-proxy RBAC rules")
|
||||
}
|
||||
@ -119,15 +83,61 @@ func CreateRBACRules(client clientset.Interface) error {
|
||||
return createClusterRoleBindings(client)
|
||||
}
|
||||
|
||||
func createKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client clientset.Interface) error {
|
||||
func createKubeProxyConfigMap(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubeadmapi.APIEndpoint, client clientset.Interface) error {
|
||||
// Generate ControlPlane Enpoint kubeconfig file
|
||||
controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, localEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeProxyCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeProxyGroup]
|
||||
if !ok {
|
||||
return errors.New("no kube-proxy component config found in the active component config set")
|
||||
}
|
||||
|
||||
proxyBytes, err := kubeProxyCfg.Marshal()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error when marshaling")
|
||||
}
|
||||
var prefixBytes bytes.Buffer
|
||||
apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ")
|
||||
configMapBytes, err := kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
|
||||
struct {
|
||||
ControlPlaneEndpoint string
|
||||
ProxyConfig string
|
||||
ProxyConfigMap string
|
||||
ProxyConfigMapKey string
|
||||
}{
|
||||
ControlPlaneEndpoint: controlPlaneEndpoint,
|
||||
ProxyConfig: prefixBytes.String(),
|
||||
ProxyConfigMap: constants.KubeProxyConfigMap,
|
||||
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error when parsing kube-proxy configmap template")
|
||||
}
|
||||
|
||||
kubeproxyConfigMap := &v1.ConfigMap{}
|
||||
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil {
|
||||
return errors.Wrap(err, "unable to decode kube-proxy configmap")
|
||||
}
|
||||
|
||||
if !kubeProxyCfg.IsUserSupplied() {
|
||||
componentconfigs.SignConfigMap(kubeproxyConfigMap)
|
||||
}
|
||||
|
||||
// Create the ConfigMap for kube-proxy or update it in case it already exists
|
||||
if err := apiclient.CreateOrUpdateConfigMap(client, kubeproxyConfigMap); err != nil {
|
||||
return err
|
||||
return apiclient.CreateOrUpdateConfigMap(client, kubeproxyConfigMap)
|
||||
}
|
||||
|
||||
func createKubeProxyAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error {
|
||||
daemonSetbytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ Image, ProxyConfigMap, ProxyConfigMapKey string }{
|
||||
Image: images.GetKubernetesImage(constants.KubeProxy, cfg),
|
||||
ProxyConfigMap: constants.KubeProxyConfigMap,
|
||||
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error when parsing kube-proxy daemonset template")
|
||||
}
|
||||
|
||||
kubeproxyDaemonSet := &apps.DaemonSet{}
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
func WriteConfigToDisk(cfg *kubeadmapi.ClusterConfiguration, kubeletDir string) error {
|
||||
kubeletCfg, ok := cfg.ComponentConfigs[componentconfigs.KubeletGroup]
|
||||
if !ok {
|
||||
return errors.New("no kubelet component config found in the active component config set")
|
||||
return errors.New("no kubelet component config found")
|
||||
}
|
||||
|
||||
kubeletBytes, err := kubeletCfg.Marshal()
|
||||
@ -73,7 +73,7 @@ func CreateConfigMap(cfg *kubeadmapi.ClusterConfiguration, client clientset.Inte
|
||||
return err
|
||||
}
|
||||
|
||||
if err := apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configMapName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
@ -81,7 +81,13 @@ func CreateConfigMap(cfg *kubeadmapi.ClusterConfiguration, client clientset.Inte
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes),
|
||||
},
|
||||
}); err != nil {
|
||||
}
|
||||
|
||||
if !kubeletCfg.IsUserSupplied() {
|
||||
componentconfigs.SignConfigMap(configMap)
|
||||
}
|
||||
|
||||
if err := apiclient.CreateOrUpdateConfigMap(client, configMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user