mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Update unit tests
This commit is contained in:
parent
495ac9883d
commit
17b3b28190
@ -25,10 +25,11 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
)
|
||||
|
||||
@ -40,11 +41,10 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
obj.KubernetesVersion = "v10"
|
||||
obj.API.BindPort = 20
|
||||
obj.API.AdvertiseAddress = "foo"
|
||||
obj.Networking.ServiceSubnet = "foo"
|
||||
obj.Networking.DNSDomain = "foo"
|
||||
obj.Networking.ServiceSubnet = "10.96.0.0/12"
|
||||
obj.Networking.DNSDomain = "cluster.local"
|
||||
obj.CertificatesDir = "foo"
|
||||
obj.APIServerCertSANs = []string{"foo"}
|
||||
|
||||
obj.BootstrapTokens = []kubeadm.BootstrapToken{
|
||||
{
|
||||
Token: &kubeadm.BootstrapTokenString{
|
||||
@ -68,9 +68,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
MountPath: "foo",
|
||||
Writable: false,
|
||||
}}
|
||||
// Note: We don't set values here for obj.Etcd.External, as these are mutually exlusive.
|
||||
// And to make sure the fuzzer doesn't set a random value for obj.Etcd.External, we let
|
||||
// kubeadmapi.Etcd implement fuzz.Interface (we handle that ourselves)
|
||||
obj.Etcd.Local = &kubeadm.LocalEtcd{
|
||||
Image: "foo",
|
||||
DataDir: "foo",
|
||||
@ -83,65 +80,23 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
Name: "foo",
|
||||
Taints: []v1.Taint{},
|
||||
}
|
||||
extkubeletconfig := &kubeletconfigv1beta1.KubeletConfiguration{
|
||||
StaticPodPath: "foo",
|
||||
ClusterDNS: []string{"foo"},
|
||||
ClusterDomain: "foo",
|
||||
Authorization: kubeletconfigv1beta1.KubeletAuthorization{
|
||||
Mode: "Webhook",
|
||||
},
|
||||
Authentication: kubeletconfigv1beta1.KubeletAuthentication{
|
||||
X509: kubeletconfigv1beta1.KubeletX509Authentication{
|
||||
ClientCAFile: "/etc/kubernetes/pki/ca.crt",
|
||||
},
|
||||
Anonymous: kubeletconfigv1beta1.KubeletAnonymousAuthentication{
|
||||
Enabled: utilpointer.BoolPtr(false),
|
||||
},
|
||||
},
|
||||
RotateCertificates: true,
|
||||
}
|
||||
obj.ComponentConfigs.Kubelet = &kubeletconfig.KubeletConfiguration{}
|
||||
kubeletconfigv1beta1.SetDefaults_KubeletConfiguration(extkubeletconfig)
|
||||
scheme, _, _ := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||
scheme.Convert(extkubeletconfig, obj.ComponentConfigs.Kubelet, nil)
|
||||
obj.ComponentConfigs.KubeProxy = &kubeproxyconfig.KubeProxyConfiguration{
|
||||
FeatureGates: map[string]bool{"foo": true},
|
||||
BindAddress: "foo",
|
||||
HealthzBindAddress: "foo:10256",
|
||||
MetricsBindAddress: "foo:",
|
||||
EnableProfiling: bool(true),
|
||||
ClusterCIDR: "foo",
|
||||
HostnameOverride: "foo",
|
||||
ClientConnection: kubeproxyconfig.ClientConnectionConfiguration{
|
||||
KubeConfigFile: "foo",
|
||||
AcceptContentTypes: "foo",
|
||||
ContentType: "foo",
|
||||
QPS: float32(5),
|
||||
Burst: 10,
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 1},
|
||||
},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeBit: utilpointer.Int32Ptr(0),
|
||||
SyncPeriod: metav1.Duration{Duration: 1},
|
||||
},
|
||||
OOMScoreAdj: utilpointer.Int32Ptr(0),
|
||||
ResourceContainer: "foo",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
MaxPerCore: utilpointer.Int32Ptr(2),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5},
|
||||
},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1},
|
||||
}
|
||||
obj.AuditPolicyConfiguration = kubeadm.AuditPolicyConfiguration{
|
||||
Path: "foo",
|
||||
LogDir: "/foo",
|
||||
LogMaxAge: utilpointer.Int32Ptr(0),
|
||||
}
|
||||
// Set the Kubelet ComponentConfig to an empty, defaulted struct
|
||||
extkubeletconfig := &kubeletconfigv1beta1.KubeletConfiguration{}
|
||||
obj.ComponentConfigs.Kubelet = &kubeletconfig.KubeletConfiguration{}
|
||||
componentconfigs.Scheme.Default(extkubeletconfig)
|
||||
componentconfigs.Scheme.Convert(extkubeletconfig, obj.ComponentConfigs.Kubelet, nil)
|
||||
componentconfigs.DefaultKubeletConfiguration(obj)
|
||||
// Set the KubeProxy ComponentConfig to an empty, defaulted struct
|
||||
extkubeproxyconfig := &kubeproxyconfigv1alpha1.KubeProxyConfiguration{}
|
||||
obj.ComponentConfigs.KubeProxy = &kubeproxyconfig.KubeProxyConfiguration{}
|
||||
componentconfigs.Scheme.Default(extkubeproxyconfig)
|
||||
componentconfigs.Scheme.Convert(extkubeproxyconfig, obj.ComponentConfigs.KubeProxy, nil)
|
||||
componentconfigs.DefaultKubeProxyConfiguration(obj)
|
||||
},
|
||||
func(obj *kubeadm.NodeConfiguration, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(obj)
|
||||
|
@ -19,7 +19,6 @@ package validation
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -28,7 +27,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
)
|
||||
@ -678,308 +676,6 @@ func TestValidateIgnorePreflightErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateKubeletConfiguration(t *testing.T) {
|
||||
successCase := &kubeletconfig.KubeletConfiguration{
|
||||
CgroupsPerQOS: true,
|
||||
EnforceNodeAllocatable: []string{"pods", "system-reserved", "kube-reserved"},
|
||||
SystemCgroups: "",
|
||||
CgroupRoot: "",
|
||||
EventBurst: 10,
|
||||
EventRecordQPS: 5,
|
||||
HealthzPort: 10248,
|
||||
ImageGCHighThresholdPercent: 85,
|
||||
ImageGCLowThresholdPercent: 80,
|
||||
IPTablesDropBit: 15,
|
||||
IPTablesMasqueradeBit: 14,
|
||||
KubeAPIBurst: 10,
|
||||
KubeAPIQPS: 5,
|
||||
MaxOpenFiles: 1000000,
|
||||
MaxPods: 110,
|
||||
OOMScoreAdj: -999,
|
||||
PodsPerCore: 100,
|
||||
Port: 65535,
|
||||
ReadOnlyPort: 0,
|
||||
RegistryBurst: 10,
|
||||
RegistryPullQPS: 5,
|
||||
HairpinMode: "promiscuous-bridge",
|
||||
}
|
||||
if allErrors := ValidateKubeletConfiguration(successCase, nil); len(allErrors) != 0 {
|
||||
t.Errorf("failed ValidateKubeletConfiguration: expect no errors but got %v", allErrors)
|
||||
}
|
||||
|
||||
errorCase := &kubeletconfig.KubeletConfiguration{
|
||||
CgroupsPerQOS: false,
|
||||
EnforceNodeAllocatable: []string{"pods", "system-reserved", "kube-reserved", "illegal-key"},
|
||||
SystemCgroups: "/",
|
||||
CgroupRoot: "",
|
||||
EventBurst: -10,
|
||||
EventRecordQPS: -10,
|
||||
HealthzPort: -10,
|
||||
ImageGCHighThresholdPercent: 101,
|
||||
ImageGCLowThresholdPercent: 101,
|
||||
IPTablesDropBit: -10,
|
||||
IPTablesMasqueradeBit: -10,
|
||||
KubeAPIBurst: -10,
|
||||
KubeAPIQPS: -10,
|
||||
MaxOpenFiles: -10,
|
||||
MaxPods: -10,
|
||||
OOMScoreAdj: -1001,
|
||||
PodsPerCore: -10,
|
||||
Port: 0,
|
||||
ReadOnlyPort: -10,
|
||||
RegistryBurst: -10,
|
||||
RegistryPullQPS: -10,
|
||||
}
|
||||
if allErrors := ValidateKubeletConfiguration(errorCase, nil); len(allErrors) == 0 {
|
||||
t.Errorf("failed ValidateKubeletConfiguration: expect errors but got no error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateKubeProxyConfiguration(t *testing.T) {
|
||||
successCases := []kubeadm.MasterConfiguration{
|
||||
{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "192.168.59.103",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := ValidateProxy(successCase.ComponentConfigs.KubeProxy, nil); len(errs) != 0 {
|
||||
t.Errorf("failed ValidateProxy: expect no errors but got %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
masterConfig kubeadm.MasterConfiguration
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
masterConfig: kubeadm.MasterConfiguration{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
// only BindAddress is invalid
|
||||
BindAddress: "10.10.12.11:2000",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
msg: "not a valid textual representation of an IP address",
|
||||
},
|
||||
{
|
||||
masterConfig: kubeadm.MasterConfiguration{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
// only HealthzBindAddress is invalid
|
||||
HealthzBindAddress: "0.0.0.0",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
msg: "must be IP:port",
|
||||
},
|
||||
{
|
||||
masterConfig: kubeadm.MasterConfiguration{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
// only MetricsBindAddress is invalid
|
||||
MetricsBindAddress: "127.0.0.1",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
msg: "must be IP:port",
|
||||
},
|
||||
{
|
||||
masterConfig: kubeadm.MasterConfiguration{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
// only ClusterCIDR is invalid
|
||||
ClusterCIDR: "192.168.59.0",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
msg: "must be a valid CIDR block (e.g. 10.100.0.0/16)",
|
||||
},
|
||||
{
|
||||
masterConfig: kubeadm.MasterConfiguration{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
// only UDPIdleTimeout is invalid
|
||||
UDPIdleTimeout: metav1.Duration{Duration: -1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
msg: "must be greater than 0",
|
||||
},
|
||||
{
|
||||
masterConfig: kubeadm.MasterConfiguration{
|
||||
ComponentConfigs: kubeadm.ComponentConfigs{
|
||||
KubeProxy: &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
// only ConfigSyncPeriod is invalid
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: utilpointer.Int32Ptr(2),
|
||||
MaxPerCore: utilpointer.Int32Ptr(1),
|
||||
Min: utilpointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
msg: "must be greater than 0",
|
||||
},
|
||||
}
|
||||
|
||||
for i, errorCase := range errorCases {
|
||||
if errs := ValidateProxy(errorCase.masterConfig.ComponentConfigs.KubeProxy, nil); len(errs) == 0 {
|
||||
t.Errorf("%d failed ValidateProxy: expected error for %s, but got no error", i, errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("%d failed ValidateProxy: unexpected error: %v, expected: %s", i, errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateArgSelection(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
|
@ -58,8 +58,6 @@ func TestPrintConfiguration(t *testing.T) {
|
||||
image: ""
|
||||
imageRepository: ""
|
||||
kind: MasterConfiguration
|
||||
kubeProxy: {}
|
||||
kubeletConfiguration: {}
|
||||
kubernetesVersion: v1.7.1
|
||||
networking:
|
||||
dnsDomain: ""
|
||||
@ -100,8 +98,6 @@ func TestPrintConfiguration(t *testing.T) {
|
||||
keyFile: ""
|
||||
imageRepository: ""
|
||||
kind: MasterConfiguration
|
||||
kubeProxy: {}
|
||||
kubeletConfiguration: {}
|
||||
kubernetesVersion: v1.7.1
|
||||
networking:
|
||||
dnsDomain: ""
|
||||
|
@ -28,9 +28,9 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
)
|
||||
|
||||
@ -173,32 +173,17 @@ func TestEnsureProxyAddon(t *testing.T) {
|
||||
|
||||
// Create a fake client and set up default test configuration
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
|
||||
// TODO: Consider using a YAML file instead for this that makes it possible to specify YAML documents for the ComponentConfigs
|
||||
masterConfig := &kubeadmapiv1alpha3.MasterConfiguration{
|
||||
API: kubeadmapiv1alpha3.API{
|
||||
AdvertiseAddress: "1.2.3.4",
|
||||
BindPort: 1234,
|
||||
},
|
||||
KubeProxy: kubeadmapiv1alpha3.KubeProxy{
|
||||
Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{
|
||||
BindAddress: "",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
Networking: kubeadmapiv1alpha3.Networking{
|
||||
PodSubnet: "5.6.7.8/24",
|
||||
},
|
||||
ImageRepository: "someRepo",
|
||||
KubernetesVersion: "v1.10.0",
|
||||
UnifiedControlPlaneImage: "someImage",
|
||||
ImageRepository: "someRepo",
|
||||
KubernetesVersion: "v1.10.0",
|
||||
}
|
||||
|
||||
// Simulate an error if necessary
|
||||
@ -214,10 +199,26 @@ func TestEnsureProxyAddon(t *testing.T) {
|
||||
masterConfig.Networking.PodSubnet = "2001:101::/96"
|
||||
}
|
||||
|
||||
kubeadmapiv1alpha3.SetDefaults_MasterConfiguration(masterConfig)
|
||||
intMaster, err := cmdutil.ConfigFileAndDefaultsToInternalConfig("", masterConfig)
|
||||
intMaster, err := configutil.ConfigFileAndDefaultsToInternalConfig("", masterConfig)
|
||||
if err != nil {
|
||||
t.Errorf(" test failed to convert v1alpha1 to internal version")
|
||||
t.Errorf("test failed to convert external to internal version")
|
||||
break
|
||||
}
|
||||
intMaster.ComponentConfigs.KubeProxy = &kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
}
|
||||
// Run dynamic defaulting again as we changed the internal cfg
|
||||
if err := configutil.SetInitDynamicDefaults(intMaster); err != nil {
|
||||
t.Errorf("test failed to set dynamic defaults: %v", err)
|
||||
break
|
||||
}
|
||||
err = EnsureProxyAddon(intMaster, client)
|
||||
|
@ -61,7 +61,7 @@ func TestUploadConfiguration(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(t2 *testing.T) {
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
KubernetesVersion: "v1.10.3",
|
||||
BootstrapTokens: []kubeadmapi.BootstrapToken{
|
||||
@ -85,7 +85,7 @@ func TestUploadConfiguration(t *testing.T) {
|
||||
}
|
||||
// For idempotent test, we check the result of the second call.
|
||||
if err := UploadConfiguration(cfg, client); !tt.updateExisting && (err != nil) != tt.errExpected {
|
||||
t.Errorf("UploadConfiguration() error = %v, wantErr %v", err, tt.errExpected)
|
||||
t2.Fatalf("UploadConfiguration() error = %v, wantErr %v", err, tt.errExpected)
|
||||
}
|
||||
if tt.updateExisting {
|
||||
if tt.errOnUpdate != nil {
|
||||
@ -94,49 +94,36 @@ func TestUploadConfiguration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
if err := UploadConfiguration(cfg, client); (err != nil) != tt.errExpected {
|
||||
t.Errorf("UploadConfiguration() error = %v", err)
|
||||
t2.Fatalf("UploadConfiguration() error = %v", err)
|
||||
}
|
||||
}
|
||||
if tt.verifyResult {
|
||||
masterCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.MasterConfigurationConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Fail to query ConfigMap error = %v", err)
|
||||
t2.Fatalf("Fail to query ConfigMap error = %v", err)
|
||||
}
|
||||
configData := masterCfg.Data[kubeadmconstants.MasterConfigurationConfigMapKey]
|
||||
if configData == "" {
|
||||
t.Errorf("Fail to find ConfigMap key")
|
||||
t2.Fatalf("Fail to find ConfigMap key")
|
||||
}
|
||||
|
||||
decodedExtCfg := &kubeadmapiv1alpha3.MasterConfiguration{}
|
||||
decodedCfg := &kubeadmapi.MasterConfiguration{}
|
||||
|
||||
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(configData), decodedExtCfg); err != nil {
|
||||
t.Errorf("unable to decode config from bytes: %v", err)
|
||||
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(configData), decodedCfg); err != nil {
|
||||
t2.Fatalf("unable to decode config from bytes: %v", err)
|
||||
}
|
||||
// Default and convert to the internal version
|
||||
kubeadmscheme.Scheme.Default(decodedExtCfg)
|
||||
kubeadmscheme.Scheme.Convert(decodedExtCfg, decodedCfg, nil)
|
||||
|
||||
if decodedCfg.KubernetesVersion != cfg.KubernetesVersion {
|
||||
t.Errorf("Decoded value doesn't match, decoded = %#v, expected = %#v", decodedCfg.KubernetesVersion, cfg.KubernetesVersion)
|
||||
t2.Errorf("Decoded value doesn't match, decoded = %#v, expected = %#v", decodedCfg.KubernetesVersion, cfg.KubernetesVersion)
|
||||
}
|
||||
|
||||
// If the decoded cfg has a BootstrapTokens array, verify the sensitive information we had isn't still there.
|
||||
if len(decodedCfg.BootstrapTokens) > 0 && decodedCfg.BootstrapTokens[0].Token != nil && decodedCfg.BootstrapTokens[0].Token.String() == cfg.BootstrapTokens[0].Token.String() {
|
||||
t.Errorf("Decoded value contains .BootstrapTokens (sensitive info), decoded = %#v, expected = empty", decodedCfg.BootstrapTokens)
|
||||
t2.Errorf("Decoded value contains .BootstrapTokens (sensitive info), decoded = %#v, expected = empty", decodedCfg.BootstrapTokens)
|
||||
}
|
||||
|
||||
// Make sure no information from NodeRegistrationOptions was uploaded.
|
||||
if decodedCfg.NodeRegistration.Name == cfg.NodeRegistration.Name || decodedCfg.NodeRegistration.CRISocket != kubeadmapiv1alpha3.DefaultCRISocket {
|
||||
t.Errorf("Decoded value contains .NodeRegistration (node-specific info shouldn't be uploaded), decoded = %#v, expected = empty", decodedCfg.NodeRegistration)
|
||||
}
|
||||
|
||||
if decodedExtCfg.Kind != "MasterConfiguration" {
|
||||
t.Errorf("Expected kind MasterConfiguration, got %v", decodedExtCfg.Kind)
|
||||
}
|
||||
|
||||
if decodedExtCfg.APIVersion != "kubeadm.k8s.io/v1alpha3" {
|
||||
t.Errorf("Expected apiVersion kubeadm.k8s.io/v1alpha3, got %v", decodedExtCfg.APIVersion)
|
||||
t2.Errorf("Decoded value contains .NodeRegistration (node-specific info shouldn't be uploaded), decoded = %#v, expected = empty", decodedCfg.NodeRegistration)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -19,15 +19,14 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -87,7 +86,7 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) {
|
||||
// These tests are reading one file that has only a subset of the fields populated, loading it using ConfigFileAndDefaultsToInternalConfig,
|
||||
// and then marshals the internal object to the expected groupVersion
|
||||
{ // v1alpha2 -> default -> validate -> internal -> v1alpha3
|
||||
name: "incompleteYAMLToDefaultedv1alpha2",
|
||||
name: "incompleteYAMLToDefaultedv1alpha3",
|
||||
in: master_incompleteYAML,
|
||||
out: master_defaultedYAML,
|
||||
groupVersion: kubeadmapiv1alpha3.SchemeGroupVersion,
|
||||
@ -110,7 +109,7 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) {
|
||||
t2.Fatalf("couldn't unmarshal test data: %v", err)
|
||||
}
|
||||
|
||||
actual, err := kubeadmutil.MarshalToYamlForCodecs(internalcfg, rt.groupVersion, scheme.Codecs)
|
||||
actual, err := MarshalMasterConfigurationToBytes(internalcfg, rt.groupVersion)
|
||||
if err != nil {
|
||||
t2.Fatalf("couldn't marshal internal object: %v", err)
|
||||
}
|
||||
@ -127,3 +126,92 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsistentOrderByteSlice(t *testing.T) {
|
||||
var (
|
||||
aKind = "Akind"
|
||||
aFile = []byte(`
|
||||
kind: Akind
|
||||
apiVersion: foo.k8s.io/v1
|
||||
`)
|
||||
aaKind = "Aakind"
|
||||
aaFile = []byte(`
|
||||
kind: Aakind
|
||||
apiVersion: foo.k8s.io/v1
|
||||
`)
|
||||
abKind = "Abkind"
|
||||
abFile = []byte(`
|
||||
kind: Abkind
|
||||
apiVersion: foo.k8s.io/v1
|
||||
`)
|
||||
)
|
||||
var tests = []struct {
|
||||
name string
|
||||
in map[string][]byte
|
||||
expected [][]byte
|
||||
}{
|
||||
{
|
||||
name: "a_aa_ab",
|
||||
in: map[string][]byte{
|
||||
aKind: aFile,
|
||||
aaKind: aaFile,
|
||||
abKind: abFile,
|
||||
},
|
||||
expected: [][]byte{aaFile, abFile, aFile},
|
||||
},
|
||||
{
|
||||
name: "a_ab_aa",
|
||||
in: map[string][]byte{
|
||||
aKind: aFile,
|
||||
abKind: abFile,
|
||||
aaKind: aaFile,
|
||||
},
|
||||
expected: [][]byte{aaFile, abFile, aFile},
|
||||
},
|
||||
{
|
||||
name: "aa_a_ab",
|
||||
in: map[string][]byte{
|
||||
aaKind: aaFile,
|
||||
aKind: aFile,
|
||||
abKind: abFile,
|
||||
},
|
||||
expected: [][]byte{aaFile, abFile, aFile},
|
||||
},
|
||||
{
|
||||
name: "aa_ab_a",
|
||||
in: map[string][]byte{
|
||||
aaKind: aaFile,
|
||||
abKind: abFile,
|
||||
aKind: aFile,
|
||||
},
|
||||
expected: [][]byte{aaFile, abFile, aFile},
|
||||
},
|
||||
{
|
||||
name: "ab_a_aa",
|
||||
in: map[string][]byte{
|
||||
abKind: abFile,
|
||||
aKind: aFile,
|
||||
aaKind: aaFile,
|
||||
},
|
||||
expected: [][]byte{aaFile, abFile, aFile},
|
||||
},
|
||||
{
|
||||
name: "ab_aa_a",
|
||||
in: map[string][]byte{
|
||||
abKind: abFile,
|
||||
aaKind: aaFile,
|
||||
aKind: aFile,
|
||||
},
|
||||
expected: [][]byte{aaFile, abFile, aFile},
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t2 *testing.T) {
|
||||
actual := consistentOrderByteSlice(rt.in)
|
||||
if !reflect.DeepEqual(rt.expected, actual) {
|
||||
t2.Errorf("the expected and actual output differs.\n\texpected: %s\n\tout: %s\n", rt.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -24,8 +24,9 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
@ -125,16 +126,25 @@ func TestMarshalUnmarshalToYamlForCodecs(t *testing.T) {
|
||||
ServiceSubnet: "10.100.0.0/24",
|
||||
PodSubnet: "10.100.1.0/24",
|
||||
},
|
||||
BootstrapTokens: []kubeadmapiv1alpha3.BootstrapToken{
|
||||
{
|
||||
Token: &kubeadmapiv1alpha3.BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"},
|
||||
},
|
||||
},
|
||||
}
|
||||
scheme.Scheme.Default(cfg)
|
||||
|
||||
bytes, err := MarshalToYamlForCodecs(cfg, kubeadmapiv1alpha3.SchemeGroupVersion, scheme.Codecs)
|
||||
kubeadmapiv1alpha3.SetDefaults_MasterConfiguration(cfg)
|
||||
scheme := runtime.NewScheme()
|
||||
kubeadmapiv1alpha3.AddToScheme(scheme)
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
|
||||
bytes, err := MarshalToYamlForCodecs(cfg, kubeadmapiv1alpha3.SchemeGroupVersion, codecs)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error marshalling MasterConfiguration: %v", err)
|
||||
}
|
||||
t.Logf("\n%s", bytes)
|
||||
|
||||
obj, err := UnmarshalFromYamlForCodecs(bytes, kubeadmapiv1alpha3.SchemeGroupVersion, scheme.Codecs)
|
||||
obj, err := UnmarshalFromYamlForCodecs(bytes, kubeadmapiv1alpha3.SchemeGroupVersion, codecs)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error unmarshalling MasterConfiguration: %v", err)
|
||||
}
|
||||
@ -148,6 +158,12 @@ func TestMarshalUnmarshalToYamlForCodecs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// {{MasterConfiguration kubeadm.k8s.io/v1alpha3} [{<nil> nil <nil> [] []}] {testNode /var/run/cri.sock [] map[]} {10.100.0.1 4332} {0xc4200ad2c0 <nil>} {10.100.0.0/24 10.100.1.0/24 cluster.local} stable-1.11 map[] map[] map[] [] [] [] [] /etc/kubernetes/pki k8s.gcr.io { /var/log/kubernetes/audit 0x156e2f4} map[] kubernetes}
|
||||
// {{MasterConfiguration kubeadm.k8s.io/v1alpha3} [{<nil> &Duration{Duration:24h0m0s,} <nil> [signing authentication] [system:bootstrappers:kubeadm:default-node-token]}] {testNode /var/run/cri.sock [] map[]} {10.100.0.1 4332} {0xc4205c5260 <nil>} {10.100.0.0/24 10.100.1.0/24 cluster.local} stable-1.11 map[] map[] map[] [] [] [] [] /etc/kubernetes/pki k8s.gcr.io { /var/log/kubernetes/audit 0xc4204dd82c} map[] kubernetes}
|
||||
|
||||
// {{MasterConfiguration kubeadm.k8s.io/v1alpha3} [{abcdef.abcdef0123456789 nil <nil> [] []}] {testNode /var/run/cri.sock [] map[]} {10.100.0.1 4332} {0xc42012ca80 <nil>} {10.100.0.0/24 10.100.1.0/24 cluster.local} stable-1.11 map[] map[] map[] [] [] [] [] /etc/kubernetes/pki k8s.gcr.io { /var/log/kubernetes/audit 0x156e2f4} map[] kubernetes}
|
||||
// {{MasterConfiguration kubeadm.k8s.io/v1alpha3} [{abcdef.abcdef0123456789 &Duration{Duration:24h0m0s,} <nil> [signing authentication] [system:bootstrappers:kubeadm:default-node-token]}] {testNode /var/run/cri.sock [] map[]} {10.100.0.1 4332} {0xc42039d1a0 <nil>} {10.100.0.0/24 10.100.1.0/24 cluster.local} stable-1.11 map[] map[] map[] [] [] [] [] /etc/kubernetes/pki k8s.gcr.io { /var/log/kubernetes/audit 0xc4204fef3c} map[] kubernetes}
|
||||
|
||||
func TestSplitYAMLDocuments(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
|
Loading…
Reference in New Issue
Block a user