diff --git a/cmd/kubeadm/app/apis/kubeadm/BUILD b/cmd/kubeadm/app/apis/kubeadm/BUILD index b1516565a46..ef58b5ddc6c 100644 --- a/cmd/kubeadm/app/apis/kubeadm/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/BUILD @@ -17,6 +17,7 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", deps = [ "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD b/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD index 52582b01b69..9adebea330c 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index dcf554fbabb..3d2c62959f5 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -25,6 +25,7 @@ import ( runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) @@ -76,6 +77,41 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { }, } kubeletconfigv1alpha1.SetDefaults_KubeletConfiguration(obj.KubeletConfiguration.BaseConfig) + obj.KubeProxy = kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + FeatureGates: "foo", + BindAddress: "foo", + HealthzBindAddress: "foo:10256", + MetricsBindAddress: "foo:", + EnableProfiling: bool(true), + ClusterCIDR: "foo", + HostnameOverride: "foo", + ClientConnection: kubeproxyconfigv1alpha1.ClientConnectionConfiguration{ + KubeConfigFile: "foo", + AcceptContentTypes: "foo", + ContentType: "foo", + QPS: float32(5), + Burst: 10, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 1}, + }, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeBit: utilpointer.Int32Ptr(0), + SyncPeriod: metav1.Duration{Duration: 1}, + }, + OOMScoreAdj: utilpointer.Int32Ptr(0), + ResourceContainer: "foo", + UDPIdleTimeout: metav1.Duration{Duration: 1}, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + MaxPerCore: utilpointer.Int32Ptr(2), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5}, + }, + ConfigSyncPeriod: metav1.Duration{Duration: 1}, + }, + } }, func(obj *kubeadm.NodeConfiguration, c fuzz.Continue) { c.FuzzNoCustom(obj) diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index b02c4f55dd6..659a8a5442b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -19,6 +19,7 @@ package kubeadm import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -29,6 +30,7 @@ type MasterConfiguration struct { metav1.TypeMeta API API + KubeProxy KubeProxy Etcd Etcd KubeletConfiguration KubeletConfiguration Networking Networking @@ -173,3 +175,8 @@ type HostPathMount struct { HostPath string MountPath string } + +// KubeProxy contains elements describing the proxy configuration +type KubeProxy struct { + Config *kubeproxyconfigv1alpha1.KubeProxyConfiguration +} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD index 19172f675f2..70c1dee7788 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -23,10 +18,15 @@ go_library( "//conditions:default": [], }), importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1", + visibility = ["//visibility:public"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/features:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -46,4 +46,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index f0020b84643..88581a1dc0a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -23,7 +23,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/kubernetes/cmd/kubeadm/app/features" + kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) @@ -57,6 +61,10 @@ const ( DefaultEtcdCertDir = "/etc/kubernetes/pki/etcd" // DefaultEtcdClusterServiceName is the default name of the service backing the etcd cluster DefaultEtcdClusterServiceName = "etcd-cluster" + // DefaultProxyBindAddressv4 is the default bind address when the advertise address is v4 + DefaultProxyBindAddressv4 = "0.0.0.0" + // DefaultProxyBindAddressv6 is the default bind address when the advertise address is v6 + DefaultProxyBindAddressv6 = "::" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -104,7 +112,22 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { } SetDefaultsEtcdSelfHosted(obj) - SetDefaults_KubeletConfiguration(obj) + if features.Enabled(obj.FeatureGates, features.DynamicKubeletConfig) { + SetDefaults_KubeletConfiguration(obj) + } + SetDefaults_ProxyConfiguration(obj) +} + +// SetDefaults_ProxyConfiguration assigns default values for the Proxy +func SetDefaults_ProxyConfiguration(obj *MasterConfiguration) { + if obj.KubeProxy.Config == nil { + obj.KubeProxy.Config = &kubeproxyconfigv1alpha1.KubeProxyConfiguration{} + } + if obj.KubeProxy.Config.ClusterCIDR == "" && obj.Networking.PodSubnet != "" { + obj.KubeProxy.Config.ClusterCIDR = obj.Networking.PodSubnet + } + + kubeproxyscheme.Scheme.Default(obj.KubeProxy.Config) } // SetDefaults_NodeConfiguration assigns default values to a regular node @@ -181,4 +204,9 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { if obj.KubeletConfiguration.BaseConfig.CAdvisorPort == nil { obj.KubeletConfiguration.BaseConfig.CAdvisorPort = utilpointer.Int32Ptr(0) } + + scheme, _, _ := kubeletscheme.NewSchemeAndCodecs() + if scheme != nil { + scheme.Default(obj.KubeletConfiguration.BaseConfig) + } } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go index 06ea582386d..349dde5510f 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -29,6 +30,7 @@ type MasterConfiguration struct { metav1.TypeMeta `json:",inline"` API API `json:"api"` + KubeProxy KubeProxy `json:"kubeProxy"` Etcd Etcd `json:"etcd"` KubeletConfiguration KubeletConfiguration `json:"kubeletConfiguration"` Networking Networking `json:"networking"` @@ -155,3 +157,8 @@ type HostPathMount struct { HostPath string `json:"hostPath"` MountPath string `json:"mountPath"` } + +// KubeProxy contains elements describing the proxy configuration +type KubeProxy struct { + Config *kubeproxyconfigv1alpha1.KubeProxyConfiguration `json:"config,omitempty"` +} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 13d39b25874..5cebdd5d5bc 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -26,6 +26,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeletconfig_v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfig_v1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" unsafe "unsafe" ) @@ -43,6 +44,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_kubeadm_Etcd_To_v1alpha1_Etcd, Convert_v1alpha1_HostPathMount_To_kubeadm_HostPathMount, Convert_kubeadm_HostPathMount_To_v1alpha1_HostPathMount, + Convert_v1alpha1_KubeProxy_To_kubeadm_KubeProxy, + Convert_kubeadm_KubeProxy_To_v1alpha1_KubeProxy, Convert_v1alpha1_KubeletConfiguration_To_kubeadm_KubeletConfiguration, Convert_kubeadm_KubeletConfiguration_To_v1alpha1_KubeletConfiguration, Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration, @@ -138,6 +141,26 @@ func Convert_kubeadm_HostPathMount_To_v1alpha1_HostPathMount(in *kubeadm.HostPat return autoConvert_kubeadm_HostPathMount_To_v1alpha1_HostPathMount(in, out, s) } +func autoConvert_v1alpha1_KubeProxy_To_kubeadm_KubeProxy(in *KubeProxy, out *kubeadm.KubeProxy, s conversion.Scope) error { + out.Config = (*kubeproxyconfig_v1alpha1.KubeProxyConfiguration)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_v1alpha1_KubeProxy_To_kubeadm_KubeProxy is an autogenerated conversion function. +func Convert_v1alpha1_KubeProxy_To_kubeadm_KubeProxy(in *KubeProxy, out *kubeadm.KubeProxy, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxy_To_kubeadm_KubeProxy(in, out, s) +} + +func autoConvert_kubeadm_KubeProxy_To_v1alpha1_KubeProxy(in *kubeadm.KubeProxy, out *KubeProxy, s conversion.Scope) error { + out.Config = (*kubeproxyconfig_v1alpha1.KubeProxyConfiguration)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_kubeadm_KubeProxy_To_v1alpha1_KubeProxy is an autogenerated conversion function. +func Convert_kubeadm_KubeProxy_To_v1alpha1_KubeProxy(in *kubeadm.KubeProxy, out *KubeProxy, s conversion.Scope) error { + return autoConvert_kubeadm_KubeProxy_To_v1alpha1_KubeProxy(in, out, s) +} + func autoConvert_v1alpha1_KubeletConfiguration_To_kubeadm_KubeletConfiguration(in *KubeletConfiguration, out *kubeadm.KubeletConfiguration, s conversion.Scope) error { out.BaseConfig = (*kubeletconfig_v1alpha1.KubeletConfiguration)(unsafe.Pointer(in.BaseConfig)) return nil @@ -162,6 +185,9 @@ func autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in if err := Convert_v1alpha1_API_To_kubeadm_API(&in.API, &out.API, s); err != nil { return err } + if err := Convert_v1alpha1_KubeProxy_To_kubeadm_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil { + return err + } if err := Convert_v1alpha1_Etcd_To_kubeadm_Etcd(&in.Etcd, &out.Etcd, s); err != nil { return err } @@ -200,6 +226,9 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in if err := Convert_kubeadm_API_To_v1alpha1_API(&in.API, &out.API, s); err != nil { return err } + if err := Convert_kubeadm_KubeProxy_To_v1alpha1_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil { + return err + } if err := Convert_kubeadm_Etcd_To_v1alpha1_Etcd(&in.Etcd, &out.Etcd, s); err != nil { return err } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go index 1fbc3767d13..444eb489fe0 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" kubeletconfig_v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfig_v1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +96,31 @@ func (in *HostPathMount) DeepCopy() *HostPathMount { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxy) DeepCopyInto(out *KubeProxy) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(kubeproxyconfig_v1alpha1.KubeProxyConfiguration) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxy. +func (in *KubeProxy) DeepCopy() *KubeProxy { + if in == nil { + return nil + } + out := new(KubeProxy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { *out = *in @@ -125,6 +151,7 @@ func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { *out = *in out.TypeMeta = in.TypeMeta out.API = in.API + in.KubeProxy.DeepCopyInto(&out.KubeProxy) in.Etcd.DeepCopyInto(&out.Etcd) in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) out.Networking = in.Networking diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go index 6029af668dc..098153aa823 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go @@ -23,6 +23,7 @@ package v1alpha1 import ( runtime "k8s.io/apimachinery/pkg/runtime" kubeletconfig_v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfig_v1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -36,6 +37,9 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_MasterConfiguration(in *MasterConfiguration) { SetDefaults_MasterConfiguration(in) + if in.KubeProxy.Config != nil { + kubeproxyconfig_v1alpha1.SetDefaults_KubeProxyConfiguration(in.KubeProxy.Config) + } if in.KubeletConfiguration.BaseConfig != nil { kubeletconfig_v1alpha1.SetDefaults_KubeletConfiguration(in.KubeletConfiguration.BaseConfig) } diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD index ad9b8936a96..75cbf735129 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD @@ -1,9 +1,31 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation", + visibility = ["//visibility:public"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/features:go_default_library", + "//cmd/kubeadm/app/util:go_default_library", + "//cmd/kubeadm/app/util/token:go_default_library", + "//pkg/apis/core/validation:go_default_library", + "//pkg/kubeapiserver/authorizer/modes:go_default_library", + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/validation:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/validation:go_default_library", + "//pkg/registry/core/service/ipallocator:go_default_library", + "//pkg/util/node:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], ) go_test( @@ -13,28 +35,11 @@ go_test( library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//pkg/util/pointer:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation", - deps = [ - "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/constants:go_default_library", - "//cmd/kubeadm/app/features:go_default_library", - "//cmd/kubeadm/app/util:go_default_library", - "//cmd/kubeadm/app/util/token:go_default_library", - "//pkg/apis/core/validation:go_default_library", - "//pkg/kubeapiserver/authorizer/modes:go_default_library", - "//pkg/registry/core/service/ipallocator:go_default_library", - "//pkg/util/node:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) @@ -50,4 +55,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index 44c7c8dd9f6..f7b9ab21072 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -36,6 +36,12 @@ import ( tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" + kubeletvalidation "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" + kubeproxyscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" + proxyvalidation "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/node" ) @@ -71,9 +77,27 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs = append(allErrs, ValidateToken(c.Token, field.NewPath("token"))...) allErrs = append(allErrs, ValidateFeatureGates(c.FeatureGates, field.NewPath("feature-gates"))...) allErrs = append(allErrs, ValidateAPIEndpoint(c, field.NewPath("api-endpoint"))...) + allErrs = append(allErrs, ValidateProxy(c, field.NewPath("kube-proxy"))...) + if features.Enabled(c.FeatureGates, features.DynamicKubeletConfig) { + allErrs = append(allErrs, ValidateKubeletConfiguration(&c.KubeletConfiguration, field.NewPath("kubeletConfiguration"))...) + } return allErrs } +// ValidateProxy validates proxy configuration and collects all encountered errors +func ValidateProxy(c *kubeadm.MasterConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // Convert to the internal version + internalcfg := &kubeproxyconfig.KubeProxyConfiguration{} + err := kubeproxyscheme.Scheme.Convert(c.KubeProxy.Config, internalcfg, nil) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "KubeProxy.Config", err.Error())) + return allErrs + } + return proxyvalidation.Validate(internalcfg) +} + // ValidateNodeConfiguration validates node configuration and collects all encountered errors func ValidateNodeConfiguration(c *kubeadm.NodeConfiguration) field.ErrorList { allErrs := field.ErrorList{} @@ -351,3 +375,29 @@ func ValidateIgnorePreflightErrors(ignorePreflightErrors []string, skipPreflight return ignoreErrors, allErrs.ToAggregate() } + +// ValidateKubeletConfiguration validates kubelet configuration and collects all encountered errors +func ValidateKubeletConfiguration(c *kubeadm.KubeletConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + scheme, _, err := kubeletscheme.NewSchemeAndCodecs() + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "kubeletConfiguration", err.Error())) + return allErrs + } + + // Convert versioned config to internal config + internalcfg := &kubeletconfig.KubeletConfiguration{} + err = scheme.Convert(c.BaseConfig, internalcfg, nil) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "kubeletConfiguration", err.Error())) + return allErrs + } + + err = kubeletvalidation.ValidateKubeletConfiguration(internalcfg) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "kubeletConfiguration", err.Error())) + } + + return allErrs +} diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index 988fbb1b327..3027970c509 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -18,11 +18,16 @@ package validation import ( "testing" + "time" "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" + utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) func TestValidateTokenDiscovery(t *testing.T) { @@ -331,6 +336,32 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", BindPort: 6443, }, + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "192.168.59.103", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "10.96.0.1/12", @@ -346,6 +377,32 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1:2:3::4", BindPort: 3446, }, + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "192.168.59.103", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "2001:db8::1/98", @@ -487,3 +544,65 @@ func TestValidateIgnorePreflightErrors(t *testing.T) { } } } + +func TestValidateKubeletConfiguration(t *testing.T) { + successCase := &kubeadm.KubeletConfiguration{ + BaseConfig: &kubeletconfigv1alpha1.KubeletConfiguration{ + CgroupsPerQOS: utilpointer.BoolPtr(true), + EnforceNodeAllocatable: []string{"pods", "system-reserved", "kube-reserved"}, + SystemCgroups: "", + CgroupRoot: "", + CAdvisorPort: utilpointer.Int32Ptr(0), + EventBurst: 10, + EventRecordQPS: utilpointer.Int32Ptr(5), + HealthzPort: utilpointer.Int32Ptr(10248), + ImageGCHighThresholdPercent: utilpointer.Int32Ptr(85), + ImageGCLowThresholdPercent: utilpointer.Int32Ptr(80), + IPTablesDropBit: utilpointer.Int32Ptr(15), + IPTablesMasqueradeBit: utilpointer.Int32Ptr(14), + KubeAPIBurst: 10, + KubeAPIQPS: utilpointer.Int32Ptr(5), + MaxOpenFiles: 1000000, + MaxPods: 110, + OOMScoreAdj: utilpointer.Int32Ptr(-999), + PodsPerCore: 100, + Port: 65535, + ReadOnlyPort: utilpointer.Int32Ptr(0), + RegistryBurst: 10, + RegistryPullQPS: utilpointer.Int32Ptr(5), + }, + } + if allErrors := ValidateKubeletConfiguration(successCase, nil); len(allErrors) != 0 { + t.Errorf("failed ValidateKubeletConfiguration: expect no errors but got %v", allErrors) + } + + errorCase := &kubeadm.KubeletConfiguration{ + BaseConfig: &kubeletconfigv1alpha1.KubeletConfiguration{ + CgroupsPerQOS: utilpointer.BoolPtr(false), + EnforceNodeAllocatable: []string{"pods", "system-reserved", "kube-reserved", "illegal-key"}, + SystemCgroups: "/", + CgroupRoot: "", + CAdvisorPort: utilpointer.Int32Ptr(-10), + EventBurst: -10, + EventRecordQPS: utilpointer.Int32Ptr(-10), + HealthzPort: utilpointer.Int32Ptr(-10), + ImageGCHighThresholdPercent: utilpointer.Int32Ptr(101), + ImageGCLowThresholdPercent: utilpointer.Int32Ptr(101), + IPTablesDropBit: utilpointer.Int32Ptr(-10), + IPTablesMasqueradeBit: utilpointer.Int32Ptr(-10), + KubeAPIBurst: -10, + KubeAPIQPS: utilpointer.Int32Ptr(-10), + MaxOpenFiles: -10, + MaxPods: -10, + OOMScoreAdj: utilpointer.Int32Ptr(-1001), + PodsPerCore: -10, + Port: 0, + ReadOnlyPort: utilpointer.Int32Ptr(-10), + RegistryBurst: -10, + RegistryPullQPS: utilpointer.Int32Ptr(-10), + }, + } + if allErrors := ValidateKubeletConfiguration(errorCase, nil); len(allErrors) == 0 { + t.Errorf("failed ValidateKubeletConfiguration: expect errors but got no error") + } +} diff --git a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go index 3a50f14d66f..5c574f5f027 100644 --- a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -23,7 +23,8 @@ package kubeadm import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + kubeletconfig_v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" + v1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +96,31 @@ func (in *HostPathMount) DeepCopy() *HostPathMount { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxy) DeepCopyInto(out *KubeProxy) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(v1alpha1.KubeProxyConfiguration) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxy. +func (in *KubeProxy) DeepCopy() *KubeProxy { + if in == nil { + return nil + } + out := new(KubeProxy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { *out = *in @@ -103,7 +129,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { if *in == nil { *out = nil } else { - *out = new(v1alpha1.KubeletConfiguration) + *out = new(kubeletconfig_v1alpha1.KubeletConfiguration) (*in).DeepCopyInto(*out) } } @@ -125,6 +151,7 @@ func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { *out = *in out.TypeMeta = in.TypeMeta out.API = in.API + in.KubeProxy.DeepCopyInto(&out.KubeProxy) in.Etcd.DeepCopyInto(&out.Etcd) in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) out.Networking = in.Networking diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index e6a24c5440d..fdb4f4f240b 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -119,7 +119,7 @@ func NewCmdApply(parentFlags *cmdUpgradeFlags) *cobra.Command { func RunApply(flags *applyFlags) error { // Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap) - upgradeVars, err := enforceRequirements(flags.parent.featureGatesString, flags.parent.kubeConfigPath, flags.parent.cfgPath, flags.parent.printConfig, flags.dryRun) + upgradeVars, err := enforceRequirements(flags.parent.featureGatesString, flags.parent.kubeConfigPath, flags.parent.cfgPath, flags.parent.printConfig, flags.dryRun, flags.parent.ignorePreflightErrorsSet) if err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/upgrade/common.go b/cmd/kubeadm/app/cmd/upgrade/common.go index 1cadd65940c..2fbbf08f510 100644 --- a/cmd/kubeadm/app/cmd/upgrade/common.go +++ b/cmd/kubeadm/app/cmd/upgrade/common.go @@ -48,14 +48,14 @@ type upgradeVariables struct { } // enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure -func enforceRequirements(featureGatesString, kubeConfigPath, cfgPath string, printConfig, dryRun bool) (*upgradeVariables, error) { +func enforceRequirements(featureGatesString, kubeConfigPath, cfgPath string, printConfig, dryRun bool, ignoreChecksErrors sets.String) (*upgradeVariables, error) { client, err := getClient(kubeConfigPath, dryRun) if err != nil { return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", kubeConfigPath, err) } // Run healthchecks against the cluster - if err := upgrade.CheckClusterHealth(client); err != nil { + if err := upgrade.CheckClusterHealth(client, ignoreChecksErrors); err != nil { return nil, fmt.Errorf("[upgrade/health] FATAL: %v", err) } diff --git a/cmd/kubeadm/app/cmd/upgrade/common_test.go b/cmd/kubeadm/app/cmd/upgrade/common_test.go index 6074c03e2ff..e7105798970 100644 --- a/cmd/kubeadm/app/cmd/upgrade/common_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/common_test.go @@ -51,6 +51,7 @@ func TestPrintConfiguration(t *testing.T) { image: "" keyFile: "" imageRepository: "" + kubeProxy: {} kubeletConfiguration: baseConfig: null kubernetesVersion: v1.7.1 @@ -84,6 +85,7 @@ func TestPrintConfiguration(t *testing.T) { image: "" keyFile: "" imageRepository: "" + kubeProxy: {} kubeletConfiguration: baseConfig: null kubernetesVersion: v1.7.1 @@ -127,6 +129,7 @@ func TestPrintConfiguration(t *testing.T) { etcdVersion: v0.1.0 operatorVersion: v0.1.0 imageRepository: "" + kubeProxy: {} kubeletConfiguration: baseConfig: null kubernetesVersion: v1.7.1 diff --git a/cmd/kubeadm/app/cmd/upgrade/plan.go b/cmd/kubeadm/app/cmd/upgrade/plan.go index 6d7cee8317e..61e0b4376a6 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan.go @@ -55,7 +55,7 @@ func NewCmdPlan(parentFlags *cmdUpgradeFlags) *cobra.Command { // RunPlan takes care of outputting available versions to upgrade to for the user func RunPlan(parentFlags *cmdUpgradeFlags) error { // Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never set dry-run for plan. - upgradeVars, err := enforceRequirements(parentFlags.featureGatesString, parentFlags.kubeConfigPath, parentFlags.cfgPath, parentFlags.printConfig, false) + upgradeVars, err := enforceRequirements(parentFlags.featureGatesString, parentFlags.kubeConfigPath, parentFlags.cfgPath, parentFlags.printConfig, false, parentFlags.ignorePreflightErrorsSet) if err != nil { return err } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 79d4c0b3fa9..4cf9076eb9a 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -202,7 +202,6 @@ const ( ) var ( - // MasterTaint is the taint to apply on the PodSpec for being able to run that Pod on the master MasterTaint = v1.Taint{ Key: LabelNodeRoleMaster, @@ -232,6 +231,9 @@ var ( // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports MinimumKubeletVersion = version.MustParseSemantic("v1.8.0") + // MinimumKubeProxyComponentConfigVersion specifies the minimum version for the kubeProxyComponent + MinimumKubeProxyComponentConfigVersion = version.MustParseSemantic("v1.9.0-alpha.3") + // SupportedEtcdVersion lists officially supported etcd versions with corresponding kubernetes releases SupportedEtcdVersion = map[uint8]string{ 8: "3.0.17", diff --git a/cmd/kubeadm/app/features/features.go b/cmd/kubeadm/app/features/features.go index 3c65cad78e1..2a9fa52ac78 100644 --- a/cmd/kubeadm/app/features/features.go +++ b/cmd/kubeadm/app/features/features.go @@ -39,9 +39,6 @@ const ( // StoreCertsInSecrets is alpha in v1.8 StoreCertsInSecrets = "StoreCertsInSecrets" - // SupportIPVSProxyMode is alpha in v1.8 - SupportIPVSProxyMode = "SupportIPVSProxyMode" - // DynamicKubeletConfig is alpha in v1.9 DynamicKubeletConfig = "DynamicKubeletConfig" ) @@ -53,7 +50,6 @@ var InitFeatureGates = FeatureList{ SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}}, StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}}, HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, - SupportIPVSProxyMode: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, DynamicKubeletConfig: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, } diff --git a/cmd/kubeadm/app/phases/addons/proxy/BUILD b/cmd/kubeadm/app/phases/addons/proxy/BUILD index 68190b15986..6c0a1f8b0d5 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/BUILD +++ b/cmd/kubeadm/app/phases/addons/proxy/BUILD @@ -12,9 +12,14 @@ go_test( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy", library = ":go_default_library", deps = [ + "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", "//cmd/kubeadm/app/util:go_default_library", + "//cmd/kubeadm/app/util/config:go_default_library", "//pkg/apis/core:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//pkg/util/pointer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", @@ -31,10 +36,12 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", - "//cmd/kubeadm/app/features:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", "//pkg/api/legacyscheme:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//pkg/util/version:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/addons/proxy/manifests.go b/cmd/kubeadm/app/phases/addons/proxy/manifests.go index b1ea464e27e..9962eb44d0c 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/manifests.go +++ b/cmd/kubeadm/app/phases/addons/proxy/manifests.go @@ -17,8 +17,8 @@ limitations under the License. package proxy const ( - // KubeProxyConfigMap is the proxy ConfigMap manifest - KubeProxyConfigMap = ` + // KubeProxyConfigMap18 is the proxy ConfigMap manifest for Kubernetes version 1.8 + KubeProxyConfigMap18 = ` kind: ConfigMap apiVersion: v1 metadata: @@ -48,8 +48,40 @@ data: tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ` - // KubeProxyDaemonSet is the proxy DaemonSet manifest - KubeProxyDaemonSet = ` + // KubeProxyConfigMap19 is the proxy ConfigMap manifest for Kubernetes 1.9 and above + KubeProxyConfigMap19 = ` +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system + labels: + app: kube-proxy +data: + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: {{ .MasterEndpoint }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + config.conf: |- +{{ .ProxyConfig}} +` + // KubeProxyDaemonSet18 is the proxy DaemonSet manifest for Kubernetes version 1.8 + KubeProxyDaemonSet18 = ` apiVersion: apps/v1beta2 kind: DaemonSet metadata: @@ -75,7 +107,6 @@ spec: command: - /usr/local/bin/kube-proxy - --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf - {{ .ExtraParams }} {{ .ClusterCIDR }} securityContext: privileged: true @@ -108,4 +139,63 @@ spec: hostPath: path: /lib/modules ` + + // KubeProxyDaemonSet19 is the proxy DaemonSet manifest for Kubernetes 1.9 and above + KubeProxyDaemonSet19 = ` +apiVersion: apps/v1beta2 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - name: kube-proxy + image: {{ if .ImageOverride }}{{ .ImageOverride }}{{ else }}{{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}{{ end }} + imagePullPolicy: IfNotPresent + command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + hostNetwork: true + serviceAccountName: kube-proxy + tolerations: + - key: {{ .MasterTaintKey }} + effect: NoSchedule + - key: {{ .CloudTaintKey }} + value: "true" + effect: NoSchedule + volumes: + - name: kube-proxy + configMap: + name: kube-proxy + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: lib-modules + hostPath: + path: /lib/modules +` ) diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index 7de01796b2f..a0cade34556 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -28,10 +28,12 @@ import ( clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/features" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" "k8s.io/kubernetes/pkg/api/legacyscheme" + kubeproxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" + "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -56,27 +58,64 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte return err } - proxyConfigMapBytes, err := kubeadmutil.ParseTemplate(KubeProxyConfigMap, struct{ MasterEndpoint string }{ - // Fetch this value from the kubeconfig file - MasterEndpoint: masterEndpoint}) + proxyBytes, err := kubeadmutil.MarshalToYamlForCodecsWithShift(cfg.KubeProxy.Config, kubeproxyconfigv1alpha1.SchemeGroupVersion, + kubeproxyconfigscheme.Codecs) if err != nil { - return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) + return fmt.Errorf("error when marshaling: %v", err) } - - proxyDaemonSetBytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet, struct{ ImageRepository, Arch, Version, ImageOverride, ExtraParams, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ - ImageRepository: cfg.GetControlPlaneImageRepository(), - Arch: runtime.GOARCH, - Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion), - ImageOverride: cfg.UnifiedControlPlaneImage, - ExtraParams: getParams(cfg.FeatureGates), - ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet), - MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, - CloudTaintKey: algorithm.TaintExternalCloudProvider, - }) + // Parse the given kubernetes version + k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion) if err != nil { - return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) + return fmt.Errorf("couldn't parse kubernetes version %q: %v", cfg.KubernetesVersion, err) } + var proxyConfigMapBytes, proxyDaemonSetBytes []byte + if k8sVersion.AtLeast(kubeadmconstants.MinimumKubeProxyComponentConfigVersion) { + proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19, + struct { + MasterEndpoint string + ProxyConfig string + }{ + MasterEndpoint: masterEndpoint, + ProxyConfig: proxyBytes, + }) + if err != nil { + return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) + } + proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ + ImageRepository: cfg.GetControlPlaneImageRepository(), + Arch: runtime.GOARCH, + Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion), + ImageOverride: cfg.UnifiedControlPlaneImage, + MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, + CloudTaintKey: algorithm.TaintExternalCloudProvider, + }) + if err != nil { + return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) + } + } else { + proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap18, + struct { + MasterEndpoint string + }{ + MasterEndpoint: masterEndpoint, + }) + if err != nil { + return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) + } + proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet18, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ + ImageRepository: cfg.GetControlPlaneImageRepository(), + Arch: runtime.GOARCH, + Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion), + ImageOverride: cfg.UnifiedControlPlaneImage, + ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet), + MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, + CloudTaintKey: algorithm.TaintExternalCloudProvider, + }) + if err != nil { + return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) + } + } if err := createKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client); err != nil { return err } @@ -144,13 +183,6 @@ func createClusterRoleBindings(client clientset.Interface) error { }) } -func getParams(featureList map[string]bool) string { - if features.Enabled(featureList, features.SupportIPVSProxyMode) { - return "- --proxy-mode=ipvs\n - --feature-gates=SupportIPVSProxyMode=true" - } - return "" -} - func getClusterCIDR(podsubnet string) string { if len(podsubnet) == 0 { return "" diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go index bb192339272..7cd2c3d2abd 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go @@ -17,14 +17,21 @@ limitations under the License. package proxy import ( + "strings" "testing" + "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientsetfake "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" api "k8s.io/kubernetes/pkg/apis/core" + kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" + "k8s.io/kubernetes/pkg/util/pointer" ) func TestCreateServiceAccount(t *testing.T) { @@ -107,35 +114,180 @@ func TestCompileManifests(t *testing.T) { expected bool }{ { - manifest: KubeProxyConfigMap, - data: struct{ MasterEndpoint string }{ + manifest: KubeProxyConfigMap18, + data: struct { + MasterEndpoint, ProxyConfig string + }{ MasterEndpoint: "foo", }, expected: true, }, { - manifest: KubeProxyDaemonSet, - data: struct{ ImageRepository, Arch, Version, ImageOverride, ExtraParams, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ + manifest: KubeProxyConfigMap19, + data: struct { + MasterEndpoint, ProxyConfig string + }{ + MasterEndpoint: "foo", + ProxyConfig: " bindAddress: 0.0.0.0\n clusterCIDR: 192.168.1.1\n enableProfiling: false", + }, + expected: true, + }, + { + manifest: KubeProxyDaemonSet18, + data: struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ ImageRepository: "foo", Arch: "foo", Version: "foo", ImageOverride: "foo", - ExtraParams: "foo", ClusterCIDR: "foo", MasterTaintKey: "foo", CloudTaintKey: "foo", }, expected: true, }, + { + manifest: KubeProxyDaemonSet19, + data: struct{ ImageRepository, Arch, Version, ImageOverride, MasterTaintKey, CloudTaintKey string }{ + ImageRepository: "foo", + Arch: "foo", + Version: "foo", + ImageOverride: "foo", + MasterTaintKey: "foo", + CloudTaintKey: "foo", + }, + expected: true, + }, } for _, rt := range tests { _, actual := kubeadmutil.ParseTemplate(rt.manifest, rt.data) if (actual == nil) != rt.expected { t.Errorf( - "failed CompileManifests:\n\texpected: %t\n\t actual: %t", + "failed to compile %s manifest:\n\texpected: %t\n\t actual: %t", + rt.manifest, rt.expected, (actual == nil), ) } } } + +func TestEnsureProxyAddon(t *testing.T) { + type SimulatedError int + const ( + NoError SimulatedError = iota + ServiceAccountError + InvalidMasterEndpoint + IPv6SetBindAddress + ) + + var testCases = []struct { + name string + simError SimulatedError + expErrString string + expBindAddr string + expClusterCIDR string + }{ + { + name: "Successful proxy addon", + simError: NoError, + expErrString: "", + expBindAddr: "0.0.0.0", + expClusterCIDR: "5.6.7.8/24", + }, { + name: "Simulated service account error", + simError: ServiceAccountError, + expErrString: "error when creating kube-proxy service account", + expBindAddr: "0.0.0.0", + expClusterCIDR: "5.6.7.8/24", + }, { + name: "IPv6 AdvertiseAddress address", + simError: IPv6SetBindAddress, + expErrString: "", + expBindAddr: "::", + expClusterCIDR: "2001:101::/96", + }, + } + + for _, tc := range testCases { + + // Create a fake client and set up default test configuration + client := clientsetfake.NewSimpleClientset() + + masterConfig := &kubeadmapiext.MasterConfiguration{ + API: kubeadmapiext.API{ + AdvertiseAddress: "1.2.3.4", + BindPort: 1234, + }, + KubeProxy: kubeadmapiext.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: pointer.Int32Ptr(2), + MaxPerCore: pointer.Int32Ptr(1), + Min: pointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + Networking: kubeadmapiext.Networking{ + PodSubnet: "5.6.7.8/24", + }, + ImageRepository: "someRepo", + KubernetesVersion: "v1.9.0", + UnifiedControlPlaneImage: "someImage", + } + + // Simulate an error if neccessary + switch tc.simError { + case ServiceAccountError: + client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewUnauthorized("") + }) + case InvalidMasterEndpoint: + masterConfig.API.AdvertiseAddress = "1.2.3" + case IPv6SetBindAddress: + masterConfig.API.AdvertiseAddress = "1:2::3:4" + masterConfig.Networking.PodSubnet = "2001:101::/96" + } + + kubeadmapiext.SetDefaults_MasterConfiguration(masterConfig) + intMaster, err := cmdutil.ConfigFileAndDefaultsToInternalConfig("", masterConfig) + if err != nil { + t.Errorf(" test failed to convert v1alpha1 to internal version") + break + } + err = EnsureProxyAddon(intMaster, client) + + // Compare actual to expected errors + actErr := "No error" + if err != nil { + actErr = err.Error() + } + expErr := "No error" + if tc.expErrString != "" { + expErr = tc.expErrString + } + if !strings.Contains(actErr, expErr) { + t.Errorf( + "%s test failed, expected: %s, got: %s", + tc.name, + expErr, + actErr) + } + if intMaster.KubeProxy.Config.BindAddress != tc.expBindAddr { + t.Errorf("%s test failed, expected: %s, got: %s", + tc.name, + tc.expBindAddr, + intMaster.KubeProxy.Config.BindAddress) + } + if intMaster.KubeProxy.Config.ClusterCIDR != tc.expClusterCIDR { + t.Errorf("%s test failed, expected: %s, got: %s", + tc.name, + tc.expClusterCIDR, + intMaster.KubeProxy.Config.ClusterCIDR) + } + } +} diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index 02c1243eb37..907707d127e 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -32,6 +32,7 @@ go_library( "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/phases/selfhosting:go_default_library", "//cmd/kubeadm/app/phases/uploadconfig:go_default_library", + "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library", @@ -42,8 +43,10 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index 719eced4f30..2ce9314fac9 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -24,73 +24,72 @@ import ( apps "k8s.io/api/apps/v1beta2" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/kubernetes/cmd/kubeadm/app/preflight" ) // healthCheck is a helper struct for easily performing healthchecks against the cluster and printing the output type healthCheck struct { - description, okMessage, failMessage string - // f is invoked with a k8s client passed to it. Should return an optional warning and/or an error + name string + client clientset.Interface + // f is invoked with a k8s client passed to it. Should return an optional error f func(clientset.Interface) error } +// Check is part of the preflight.Checker interface +func (c *healthCheck) Check() (warnings, errors []error) { + if err := c.f(c.client); err != nil { + return nil, []error{err} + } + return nil, nil +} + +// Name is part of the preflight.Checker interface +func (c *healthCheck) Name() string { + return c.name +} + // CheckClusterHealth makes sure: // - the API /healthz endpoint is healthy -// - all Nodes are Ready +// - all master Nodes are Ready // - (if self-hosted) that there are DaemonSets with at least one Pod for all control plane components // - (if static pod-hosted) that all required Static Pod manifests exist on disk -func CheckClusterHealth(client clientset.Interface) error { +func CheckClusterHealth(client clientset.Interface, ignoreChecksErrors sets.String) error { fmt.Println("[upgrade] Making sure the cluster is healthy:") - healthChecks := []healthCheck{ - { - description: "API Server health", - okMessage: "Healthy", - failMessage: "Unhealthy", - f: apiServerHealthy, + healthChecks := []preflight.Checker{ + &healthCheck{ + name: "APIServerHealth", + client: client, + f: apiServerHealthy, }, - { - description: "Node health", - okMessage: "All Nodes are healthy", - failMessage: "More than one Node unhealthy", - f: nodesHealthy, + &healthCheck{ + name: "MasterNodesReady", + client: client, + f: masterNodesReady, }, // TODO: Add a check for ComponentStatuses here? } // Run slightly different health checks depending on control plane hosting type if IsControlPlaneSelfHosted(client) { - healthChecks = append(healthChecks, healthCheck{ - description: "Control plane DaemonSet health", - okMessage: "All control plane DaemonSets are healthy", - failMessage: "More than one control plane DaemonSet unhealthy", - f: controlPlaneHealth, + healthChecks = append(healthChecks, &healthCheck{ + name: "ControlPlaneHealth", + client: client, + f: controlPlaneHealth, }) } else { - healthChecks = append(healthChecks, healthCheck{ - description: "Static Pod manifests exists on disk", - okMessage: "All manifests exist on disk", - failMessage: "Some manifests don't exist on disk", - f: staticPodManifestHealth, + healthChecks = append(healthChecks, &healthCheck{ + name: "StaticPodManifest", + client: client, + f: staticPodManifestHealth, }) } - return runHealthChecks(client, healthChecks) -} - -// runHealthChecks runs a set of health checks against the cluster -func runHealthChecks(client clientset.Interface, healthChecks []healthCheck) error { - for _, check := range healthChecks { - - err := check.f(client) - if err != nil { - fmt.Printf("[upgrade/health] Checking %s: %s\n", check.description, check.failMessage) - return fmt.Errorf("The cluster is not in an upgradeable state due to: %v", err) - } - fmt.Printf("[upgrade/health] Checking %s: %s\n", check.description, check.okMessage) - } - return nil + return preflight.RunChecks(healthChecks, os.Stderr, ignoreChecksErrors) } // apiServerHealthy checks whether the API server's /healthz endpoint is healthy @@ -108,16 +107,25 @@ func apiServerHealthy(client clientset.Interface) error { return nil } -// nodesHealthy checks whether all Nodes in the cluster are in the Running state -func nodesHealthy(client clientset.Interface) error { - nodes, err := client.CoreV1().Nodes().List(metav1.ListOptions{}) +// masterNodesReady checks whether all master Nodes in the cluster are in the Running state +func masterNodesReady(client clientset.Interface) error { + selector := labels.SelectorFromSet(labels.Set(map[string]string{ + constants.LabelNodeRoleMaster: "", + })) + masters, err := client.CoreV1().Nodes().List(metav1.ListOptions{ + LabelSelector: selector.String(), + }) if err != nil { - return fmt.Errorf("couldn't list all nodes in cluster: %v", err) + return fmt.Errorf("couldn't list masters in cluster: %v", err) } - notReadyNodes := getNotReadyNodes(nodes.Items) - if len(notReadyNodes) != 0 { - return fmt.Errorf("there are NotReady Nodes in the cluster: %v", notReadyNodes) + if len(masters.Items) == 0 { + return fmt.Errorf("failed to find any nodes with master role") + } + + notReadyMasters := getNotReadyNodes(masters.Items) + if len(notReadyMasters) != 0 { + return fmt.Errorf("there are NotReady masters in the cluster: %v", notReadyMasters) } return nil } diff --git a/cmd/kubeadm/app/util/config/masterconfig.go b/cmd/kubeadm/app/util/config/masterconfig.go index e4414649c28..e4bec1d6dcc 100644 --- a/cmd/kubeadm/app/util/config/masterconfig.go +++ b/cmd/kubeadm/app/util/config/masterconfig.go @@ -44,7 +44,12 @@ func SetInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { return err } cfg.API.AdvertiseAddress = ip.String() - + ip = net.ParseIP(cfg.API.AdvertiseAddress) + if ip.To4() != nil { + cfg.KubeProxy.Config.BindAddress = kubeadmapiext.DefaultProxyBindAddressv4 + } else { + cfg.KubeProxy.Config.BindAddress = kubeadmapiext.DefaultProxyBindAddressv6 + } // Resolve possible version labels and validate version string err = NormalizeKubernetesVersion(cfg) if err != nil { diff --git a/cmd/kubeadm/app/util/marshal.go b/cmd/kubeadm/app/util/marshal.go index 67281d04732..907a0f17794 100644 --- a/cmd/kubeadm/app/util/marshal.go +++ b/cmd/kubeadm/app/util/marshal.go @@ -18,6 +18,7 @@ package util import ( "fmt" + "strings" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -41,3 +42,18 @@ func MarshalToYamlForCodecs(obj runtime.Object, gv schema.GroupVersion, codecs s encoder := codecs.EncoderForVersion(info.Serializer, gv) return runtime.Encode(encoder, obj) } + +// MarshalToYamlForCodecsWithShift adds spaces in front of each line so the indents line up +// correctly in the manifest +func MarshalToYamlForCodecsWithShift(obj runtime.Object, gv schema.GroupVersion, codecs serializer.CodecFactory) (string, error) { + serial, err := MarshalToYamlForCodecs(obj, gv, codecs) + if err != nil { + return "", err + } + lines := strings.Split(string(serial), "\n") + var newSerial string + for _, line := range lines { + newSerial = newSerial + " " + line + "\n" + } + return newSerial, err +} diff --git a/pkg/controller/node/ipam/BUILD b/pkg/controller/node/ipam/BUILD index 1f0d52855b3..e72f938e941 100644 --- a/pkg/controller/node/ipam/BUILD +++ b/pkg/controller/node/ipam/BUILD @@ -42,6 +42,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/controller/node/ipam", deps = [ + "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller:go_default_library", diff --git a/pkg/controller/node/ipam/cloud_cidr_allocator.go b/pkg/controller/node/ipam/cloud_cidr_allocator.go index 1654c312532..87fba8bb0a8 100644 --- a/pkg/controller/node/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/node/ipam/cloud_cidr_allocator.go @@ -36,11 +36,12 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/node/util" - nodeutil "k8s.io/kubernetes/pkg/util/node" + utilnode "k8s.io/kubernetes/pkg/util/node" ) // cloudCIDRAllocator allocates node CIDRs according to IP address aliases @@ -105,6 +106,12 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter if newNode.Spec.PodCIDR == "" { return ca.AllocateOrOccupyCIDR(newNode) } + // Even if PodCIDR is assigned, but NetworkUnavailable condition is + // set to true, we need to process the node to set the condition. + _, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) + if cond == nil || cond.Status != v1.ConditionFalse { + return ca.AllocateOrOccupyCIDR(newNode) + } return nil }), DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR), @@ -201,7 +208,6 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { podCIDR := cidr.String() for rep := 0; rep < cidrUpdateRetries; rep++ { - // TODO: change it to using PATCH instead of full Node updates. node, err = ca.nodeLister.Get(nodeName) if err != nil { glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err) @@ -210,7 +216,8 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { if node.Spec.PodCIDR != "" { if node.Spec.PodCIDR == podCIDR { glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) - return nil + // We don't return to set the NetworkUnavailable condition if needed. + break } glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR) @@ -220,8 +227,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { // // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248 } - node.Spec.PodCIDR = podCIDR - if _, err = ca.client.CoreV1().Nodes().Update(node); err == nil { + if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } @@ -233,7 +239,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { return err } - err = nodeutil.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{ + err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse, Reason: "RouteCreated", diff --git a/pkg/controller/node/ipam/range_allocator.go b/pkg/controller/node/ipam/range_allocator.go index 13d1f77d7eb..6761e73fde9 100644 --- a/pkg/controller/node/ipam/range_allocator.go +++ b/pkg/controller/node/ipam/range_allocator.go @@ -25,6 +25,7 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" informers "k8s.io/client-go/informers/core/v1" @@ -37,6 +38,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" "k8s.io/kubernetes/pkg/controller/node/util" + nodeutil "k8s.io/kubernetes/pkg/util/node" ) type rangeAllocator struct { @@ -282,7 +284,6 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { podCIDR := data.cidr.String() for rep := 0; rep < cidrUpdateRetries; rep++ { - // TODO: change it to using PATCH instead of full Node updates. node, err = r.nodeLister.Get(data.nodeName) if err != nil { glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) @@ -299,8 +300,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { } return nil } - node.Spec.PodCIDR = podCIDR - if _, err = r.client.CoreV1().Nodes().Update(node); err == nil { + if err = nodeutil.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } diff --git a/pkg/controller/node/ipam/range_allocator_test.go b/pkg/controller/node/ipam/range_allocator_test.go index c20ccd6a527..eadb608831f 100644 --- a/pkg/controller/node/ipam/range_allocator_test.go +++ b/pkg/controller/node/ipam/range_allocator_test.go @@ -333,6 +333,7 @@ func TestReleaseCIDRSuccess(t *testing.T) { }(), serviceCIDR: nil, subNetMaskSize: 30, + allocatedCIDRs: []string{"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, expectedAllocatedCIDRFirstRound: "127.123.234.0/30", cidrsToRelease: []string{"127.123.234.0/30"}, expectedAllocatedCIDRSecondRound: "127.123.234.0/30", diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index a985c9f0392..a4c91bdbbf8 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -150,6 +150,21 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N return err } +// PatchNodeCIDR patches the specified node's CIDR to the given value. +func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error { + raw, err := json.Marshal(cidr) + if err != nil { + return fmt.Errorf("failed to json.Marshal CIDR: %v", err) + } + + patchBytes := []byte(fmt.Sprintf(`{"spec":{"podCIDR":%s}}`, raw)) + + if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + return fmt.Errorf("failed to patch node CIDR: %v", err) + } + return nil +} + // PatchNodeStatus patches node status. func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) { oldData, err := json.Marshal(oldNode)