Merge pull request #50766 from luxas/kubeadm_selfhosting_race_condition

Automatic merge from submit-queue (batch tested with PRs 46458, 50934, 50766, 50970, 47698)

kubeadm: Make the self-hosting with certificates in Secrets mode work again

**What this PR does / why we need it**:

This PR:
 - makes the self-hosting with certificates in Secrets mode work
 - makes the wait functions timeoutable
 - fixes a race condition where the kubelet may be slow to remove the Static Pod
 - cleans up some of the self-hosting logic
 - makes self-hosting-with-secrets respect the feature flag

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

fixes: https://github.com/kubernetes/kubeadm/issues/405

**Special notes for your reviewer**:

This is work in progress. I'll add unit tests, rebase upon https://github.com/kubernetes/kubernetes/pull/50762 and maybe split out some of the functionatlity here into a separate PR

**Release note**:

```release-note
NONE
```
@kubernetes/sig-cluster-lifecycle-pr-reviews
This commit is contained in:
Kubernetes Submit Queue 2017-08-21 18:11:22 -07:00 committed by GitHub
commit b49a179ea4
17 changed files with 1117 additions and 505 deletions

View File

@ -113,6 +113,12 @@ const (
// SelfHostingPrefix describes the prefix workloads that are self-hosted by kubeadm has
SelfHostingPrefix = "self-hosted-"
// KubeCertificatesVolumeName specifies the name for the Volume that is used for injecting certificates to control plane components (can be both a hostPath volume or a projected, all-in-one volume)
KubeCertificatesVolumeName = "k8s-certs"
// KubeConfigVolumeName specifies the name for the Volume that is used for injecting the kubeconfig to talk securely to the api server for a control plane component if applicable
KubeConfigVolumeName = "kubeconfig"
// NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in
// TODO: This should be changed in the v1.8 dev cycle to a node-BT-specific group instead of the generic Bootstrap Token group that is used now
NodeBootstrapTokenAuthGroup = "system:bootstrappers"

View File

@ -33,6 +33,7 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/staticpod:go_default_library",
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
"//pkg/util/version:go_default_library",

View File

@ -28,6 +28,7 @@ import (
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/util/version"
@ -159,7 +160,7 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion *versio
}
command := []string{"kube-apiserver"}
command = append(command, staticpodutil.GetExtraParameters(cfg.APIServerExtraArgs, defaultArguments)...)
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.APIServerExtraArgs)...)
command = append(command, getAuthzParameters(cfg.AuthorizationModes)...)
// Check if the user decided to use an external etcd cluster
@ -206,7 +207,7 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, k8sVersion
}
command := []string{"kube-controller-manager"}
command = append(command, staticpodutil.GetExtraParameters(cfg.ControllerManagerExtraArgs, defaultArguments)...)
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.ControllerManagerExtraArgs)...)
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
@ -234,7 +235,7 @@ func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
}
command := []string{"kube-scheduler"}
command = append(command, staticpodutil.GetExtraParameters(cfg.SchedulerExtraArgs, defaultArguments)...)
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.SchedulerExtraArgs)...)
return command
}

View File

@ -30,11 +30,9 @@ import (
)
const (
k8sCertsVolumeName = "k8s-certs"
caCertsVolumeName = "ca-certs"
caCertsVolumePath = "/etc/ssl/certs"
caCertsPkiVolumeName = "ca-certs-etc-pki"
kubeConfigVolumeName = "kubeconfig"
)
// caCertsPkiVolumePath specifies the path that can be conditionally mounted into the apiserver and controller-manager containers
@ -49,7 +47,7 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c
// HostPath volumes for the API Server
// Read-only mount for the certificates directory
// TODO: Always mount the K8s Certificates directory to a static path inside of the container
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, k8sCertsVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true)
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeCertificatesVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true)
// Read-only mount for the ca certs (/etc/ssl/certs) directory
mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, caCertsVolumeName, caCertsVolumePath, caCertsVolumePath, true)
@ -62,17 +60,17 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c
// HostPath volumes for the controller manager
// Read-only mount for the certificates directory
// TODO: Always mount the K8s Certificates directory to a static path inside of the container
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, k8sCertsVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true)
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeCertificatesVolumeName, cfg.CertificatesDir, cfg.CertificatesDir, true)
// Read-only mount for the ca certs (/etc/ssl/certs) directory
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, caCertsVolumeName, caCertsVolumePath, caCertsVolumePath, true)
// Read-only mount for the controller manager kubeconfig file
controllerManagerKubeConfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName)
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, kubeConfigVolumeName, controllerManagerKubeConfigFile, controllerManagerKubeConfigFile, true)
mounts.NewHostPathMount(kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeConfigVolumeName, controllerManagerKubeConfigFile, controllerManagerKubeConfigFile, true)
// HostPath volumes for the scheduler
// Read-only mount for the scheduler kubeconfig file
schedulerKubeConfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.SchedulerKubeConfigFileName)
mounts.NewHostPathMount(kubeadmconstants.KubeScheduler, kubeConfigVolumeName, schedulerKubeConfigFile, schedulerKubeConfigFile, true)
mounts.NewHostPathMount(kubeadmconstants.KubeScheduler, kubeadmconstants.KubeConfigVolumeName, schedulerKubeConfigFile, schedulerKubeConfigFile, true)
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/

View File

@ -28,6 +28,7 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/staticpod:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],

View File

@ -21,6 +21,7 @@ import (
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
)
@ -60,6 +61,6 @@ func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string {
}
command := []string{"etcd"}
command = append(command, staticpodutil.GetExtraParameters(cfg.Etcd.ExtraArgs, defaultArguments)...)
command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.Etcd.ExtraArgs)...)
return command
}

View File

@ -15,8 +15,6 @@ go_test(
],
library = ":go_default_library",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/cmd/features:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
@ -34,6 +32,7 @@ go_library(
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/cmd/features:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//pkg/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",

View File

@ -17,43 +17,55 @@ limitations under the License.
package selfhosting
import (
"path/filepath"
"k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
// mutatePodSpec makes a Static Pod-hosted PodSpec suitable for self-hosting
func mutatePodSpec(cfg *kubeadmapi.MasterConfiguration, name string, podSpec *v1.PodSpec) {
mutators := map[string][]func(*kubeadmapi.MasterConfiguration, *v1.PodSpec){
const (
// selfHostedKubeConfigDir sets the directory where kubeconfig files for the scheduler and controller-manager should be mounted
// Due to how the projected volume mount works (can only be a full directory, not mount individual files), we must change this from
// the default as mounts cannot be nested (/etc/kubernetes would override /etc/kubernetes/pki)
selfHostedKubeConfigDir = "/etc/kubernetes/kubeconfig"
)
// PodSpecMutatorFunc is a function capable of mutating a PodSpec
type PodSpecMutatorFunc func(*v1.PodSpec)
// getDefaultMutators gets the mutator functions that alwasy should be used
func getDefaultMutators() map[string][]PodSpecMutatorFunc {
return map[string][]PodSpecMutatorFunc{
kubeadmconstants.KubeAPIServer: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
setVolumesOnKubeAPIServerPodSpec,
},
kubeadmconstants.KubeControllerManager: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
setVolumesOnKubeControllerManagerPodSpec,
},
kubeadmconstants.KubeScheduler: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
setVolumesOnKubeSchedulerPodSpec,
},
}
}
// mutatePodSpec makes a Static Pod-hosted PodSpec suitable for self-hosting
func mutatePodSpec(mutators map[string][]PodSpecMutatorFunc, name string, podSpec *v1.PodSpec) {
// Get the mutator functions for the component in question, then loop through and execute them
mutatorsForComponent := mutators[name]
for _, mutateFunc := range mutatorsForComponent {
mutateFunc(cfg, podSpec)
mutateFunc(podSpec)
}
}
// addNodeSelectorToPodSpec makes Pod require to be scheduled on a node marked with the master label
func addNodeSelectorToPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
func addNodeSelectorToPodSpec(podSpec *v1.PodSpec) {
if podSpec.NodeSelector == nil {
podSpec.NodeSelector = map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""}
return
@ -63,7 +75,7 @@ func addNodeSelectorToPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.P
}
// setMasterTolerationOnPodSpec makes the Pod tolerate the master taint
func setMasterTolerationOnPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
func setMasterTolerationOnPodSpec(podSpec *v1.PodSpec) {
if podSpec.Tolerations == nil {
podSpec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration}
return
@ -73,38 +85,68 @@ func setMasterTolerationOnPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *
}
// setRightDNSPolicyOnPodSpec makes sure the self-hosted components can look up things via kube-dns if necessary
func setRightDNSPolicyOnPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
func setRightDNSPolicyOnPodSpec(podSpec *v1.PodSpec) {
podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet
}
// setVolumesOnKubeAPIServerPodSpec makes sure the self-hosted api server has the required files
func setVolumesOnKubeAPIServerPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
setK8sVolume(apiServerVolume, cfg, podSpec)
for _, c := range podSpec.Containers {
c.VolumeMounts = append(c.VolumeMounts, k8sSelfHostedVolumeMount())
}
}
// setVolumesOnKubeControllerManagerPodSpec makes sure the self-hosted controller manager has the required files
func setVolumesOnKubeControllerManagerPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
setK8sVolume(controllerManagerVolume, cfg, podSpec)
for _, c := range podSpec.Containers {
c.VolumeMounts = append(c.VolumeMounts, k8sSelfHostedVolumeMount())
}
}
// setVolumesOnKubeSchedulerPodSpec makes sure the self-hosted scheduler has the required files
func setVolumesOnKubeSchedulerPodSpec(cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
setK8sVolume(schedulerVolume, cfg, podSpec)
for _, c := range podSpec.Containers {
c.VolumeMounts = append(c.VolumeMounts, k8sSelfHostedVolumeMount())
}
}
func setK8sVolume(cb func(cfg *kubeadmapi.MasterConfiguration) v1.Volume, cfg *kubeadmapi.MasterConfiguration, podSpec *v1.PodSpec) {
// setSelfHostedVolumesForAPIServer makes sure the self-hosted api server has the right volume source coming from a self-hosted cluster
func setSelfHostedVolumesForAPIServer(podSpec *v1.PodSpec) {
for i, v := range podSpec.Volumes {
if v.Name == "k8s" {
podSpec.Volumes[i] = cb(cfg)
// If the volume name matches the expected one; switch the volume source from hostPath to cluster-hosted
if v.Name == kubeadmconstants.KubeCertificatesVolumeName {
podSpec.Volumes[i].VolumeSource = apiServerCertificatesVolumeSource()
}
}
}
// setSelfHostedVolumesForControllerManager makes sure the self-hosted controller manager has the right volume source coming from a self-hosted cluster
func setSelfHostedVolumesForControllerManager(podSpec *v1.PodSpec) {
for i, v := range podSpec.Volumes {
// If the volume name matches the expected one; switch the volume source from hostPath to cluster-hosted
if v.Name == kubeadmconstants.KubeCertificatesVolumeName {
podSpec.Volumes[i].VolumeSource = controllerManagerCertificatesVolumeSource()
} else if v.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Volumes[i].VolumeSource = kubeConfigVolumeSource(kubeadmconstants.ControllerManagerKubeConfigFileName)
}
}
// Change directory for the kubeconfig directory to selfHostedKubeConfigDir
for i, vm := range podSpec.Containers[0].VolumeMounts {
if vm.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Containers[0].VolumeMounts[i].MountPath = selfHostedKubeConfigDir
}
}
// Rewrite the --kubeconfig path as the volume mount path may not overlap with certs dir, which it does by default (/etc/kubernetes and /etc/kubernetes/pki)
// This is not a problem with hostPath mounts as hostPath supports mounting one file only, instead of always a full directory. Secrets and Projected Volumes
// don't support that.
podSpec.Containers[0].Command = kubeadmutil.ReplaceArgument(podSpec.Containers[0].Command, func(argMap map[string]string) map[string]string {
argMap["kubeconfig"] = filepath.Join(selfHostedKubeConfigDir, kubeadmconstants.ControllerManagerKubeConfigFileName)
return argMap
})
}
// setSelfHostedVolumesForScheduler makes sure the self-hosted scheduler has the right volume source coming from a self-hosted cluster
func setSelfHostedVolumesForScheduler(podSpec *v1.PodSpec) {
for i, v := range podSpec.Volumes {
// If the volume name matches the expected one; switch the volume source from hostPath to cluster-hosted
if v.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Volumes[i].VolumeSource = kubeConfigVolumeSource(kubeadmconstants.SchedulerKubeConfigFileName)
}
}
// Change directory for the kubeconfig directory to selfHostedKubeConfigDir
for i, vm := range podSpec.Containers[0].VolumeMounts {
if vm.Name == kubeadmconstants.KubeConfigVolumeName {
podSpec.Containers[0].VolumeMounts[i].MountPath = selfHostedKubeConfigDir
}
}
// Rewrite the --kubeconfig path as the volume mount path may not overlap with certs dir, which it does by default (/etc/kubernetes and /etc/kubernetes/pki)
// This is not a problem with hostPath mounts as hostPath supports mounting one file only, instead of always a full directory. Secrets and Projected Volumes
// don't support that.
podSpec.Containers[0].Command = kubeadmutil.ReplaceArgument(podSpec.Containers[0].Command, func(argMap map[string]string) map[string]string {
argMap["kubeconfig"] = filepath.Join(selfHostedKubeConfigDir, kubeadmconstants.SchedulerKubeConfigFileName)
return argMap
})
}

View File

@ -18,10 +18,10 @@ package selfhosting
import (
"reflect"
"sort"
"testing"
"k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
@ -72,9 +72,8 @@ func TestMutatePodSpec(t *testing.T) {
},
}
cfg := &kubeadmapi.MasterConfiguration{}
for _, rt := range tests {
mutatePodSpec(cfg, rt.component, rt.podSpec)
mutatePodSpec(getDefaultMutators(), rt.component, rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed mutatePodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
@ -110,9 +109,8 @@ func TestAddNodeSelectorToPodSpec(t *testing.T) {
},
}
cfg := &kubeadmapi.MasterConfiguration{}
for _, rt := range tests {
addNodeSelectorToPodSpec(cfg, rt.podSpec)
addNodeSelectorToPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed addNodeSelectorToPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
@ -148,9 +146,8 @@ func TestSetMasterTolerationOnPodSpec(t *testing.T) {
},
}
cfg := &kubeadmapi.MasterConfiguration{}
for _, rt := range tests {
setMasterTolerationOnPodSpec(cfg, rt.podSpec)
setMasterTolerationOnPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setMasterTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
@ -179,12 +176,281 @@ func TestSetRightDNSPolicyOnPodSpec(t *testing.T) {
},
}
cfg := &kubeadmapi.MasterConfiguration{}
for _, rt := range tests {
setRightDNSPolicyOnPodSpec(cfg, rt.podSpec)
setRightDNSPolicyOnPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setRightDNSPolicyOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForAPIServer(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
},
Command: []string{
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
},
},
},
{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/pki",
},
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
},
Command: []string{
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
},
},
},
{
Name: "k8s-certs",
VolumeSource: apiServerCertificatesVolumeSource(),
},
},
},
},
}
for _, rt := range tests {
setSelfHostedVolumesForAPIServer(rt.podSpec)
sort.Strings(rt.podSpec.Containers[0].Command)
sort.Strings(rt.expected.Containers[0].Command)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setSelfHostedVolumesForAPIServer:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForControllerManager(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/controller-manager.conf",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/controller-manager.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
},
},
},
{
Name: "k8s-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/pki",
},
},
},
{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/controller-manager.conf",
},
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "ca-certs",
MountPath: "/etc/ssl/certs",
},
{
Name: "k8s-certs",
MountPath: "/etc/kubernetes/pki",
},
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/kubeconfig",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/kubeconfig/controller-manager.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "ca-certs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/ssl/certs",
},
},
},
{
Name: "k8s-certs",
VolumeSource: controllerManagerCertificatesVolumeSource(),
},
{
Name: "kubeconfig",
VolumeSource: kubeConfigVolumeSource(kubeadmconstants.ControllerManagerKubeConfigFileName),
},
},
},
},
}
for _, rt := range tests {
setSelfHostedVolumesForControllerManager(rt.podSpec)
sort.Strings(rt.podSpec.Containers[0].Command)
sort.Strings(rt.expected.Containers[0].Command)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setSelfHostedVolumesForControllerManager:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}
func TestSetSelfHostedVolumesForScheduler(t *testing.T) {
var tests = []struct {
podSpec *v1.PodSpec
expected v1.PodSpec
}{
{
podSpec: &v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/scheduler.conf",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/scheduler.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/kubernetes/scheduler.conf",
},
},
},
},
},
expected: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/kubeconfig",
},
},
Command: []string{
"--kubeconfig=/etc/kubernetes/kubeconfig/scheduler.conf",
"--foo=bar",
},
},
},
Volumes: []v1.Volume{
{
Name: "kubeconfig",
VolumeSource: kubeConfigVolumeSource(kubeadmconstants.SchedulerKubeConfigFileName),
},
},
},
},
}
for _, rt := range tests {
setSelfHostedVolumesForScheduler(rt.podSpec)
sort.Strings(rt.podSpec.Containers[0].Command)
sort.Strings(rt.expected.Containers[0].Command)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setSelfHostedVolumesForScheduler:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
}
}

View File

@ -37,6 +37,9 @@ import (
const (
// selfHostingWaitTimeout describes the maximum amount of time a self-hosting wait process should wait before timing out
selfHostingWaitTimeout = 2 * time.Minute
// selfHostingFailureThreshold describes how many times kubeadm will retry creating the DaemonSets
selfHostingFailureThreshold uint8 = 5
)
// CreateSelfHostedControlPlane is responsible for turning a Static Pod-hosted control plane to a self-hosted one
@ -53,13 +56,23 @@ const (
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
// Here the map of different mutators to use for the control plane's podspec is stored
mutators := getDefaultMutators()
// Some extra work to be done if we should store the control plane certificates in Secrets
if features.Enabled(cfg.FeatureFlags, features.StoreCertsInSecrets) {
if err := createTLSSecrets(cfg, client); err != nil {
// Upload the certificates and kubeconfig files from disk to the cluster as Secrets
if err := uploadTLSSecrets(client, cfg.CertificatesDir); err != nil {
return err
}
if err := createOpaqueSecrets(cfg, client); err != nil {
if err := uploadKubeConfigSecrets(client); err != nil {
return err
}
// Add the store-certs-in-secrets-specific mutators here so that the self-hosted component starts using them
mutators[kubeadmconstants.KubeAPIServer] = append(mutators[kubeadmconstants.KubeAPIServer], setSelfHostedVolumesForAPIServer)
mutators[kubeadmconstants.KubeControllerManager] = append(mutators[kubeadmconstants.KubeControllerManager], setSelfHostedVolumesForControllerManager)
mutators[kubeadmconstants.KubeScheduler] = append(mutators[kubeadmconstants.KubeScheduler], setSelfHostedVolumesForScheduler)
}
for _, componentName := range kubeadmconstants.MasterComponents {
@ -79,10 +92,12 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client cl
}
// Build a DaemonSet object from the loaded PodSpec
ds := buildDaemonSet(cfg, componentName, podSpec)
ds := buildDaemonSet(componentName, podSpec, mutators)
// Create the DaemonSet in the API Server
if err := apiclient.CreateOrUpdateDaemonSet(client, ds); err != nil {
// Create or update the DaemonSet in the API Server, and retry selfHostingFailureThreshold times if it errors out
if err := apiclient.TryRunCommand(func() error {
return apiclient.CreateOrUpdateDaemonSet(client, ds)
}, selfHostingFailureThreshold); err != nil {
return err
}
@ -115,10 +130,10 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client cl
}
// buildDaemonSet is responsible for mutating the PodSpec and return a DaemonSet which is suitable for the self-hosting purporse
func buildDaemonSet(cfg *kubeadmapi.MasterConfiguration, name string, podSpec *v1.PodSpec) *extensions.DaemonSet {
func buildDaemonSet(name string, podSpec *v1.PodSpec, mutators map[string][]PodSpecMutatorFunc) *extensions.DaemonSet {
// Mutate the PodSpec so it's suitable for self-hosting
mutatePodSpec(cfg, name, podSpec)
mutatePodSpec(mutators, name, podSpec)
// Return a DaemonSet based on that Spec
return &extensions.DaemonSet{

View File

@ -21,88 +21,13 @@ import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/ghodss/yaml"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/features"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
const (
apiProjectedSecret = `- name: k8s
projected:
sources:
- secret:
items:
- key: tls.crt
path: ca.crt
- key: tls.key
path: ca.key
name: ca
- secret:
items:
- key: tls.crt
path: apiserver.crt
- key: tls.key
path: apiserver.key
name: apiserver
- secret:
items:
- key: tls.crt
path: apiserver-kubelet-client.crt
- key: tls.key
path: apiserver-kubelet-client.key
name: apiserver-kubelet-client
- secret:
items:
- key: tls.crt
path: sa.pub
- key: tls.key
path: sa.key
name: sa
- secret:
items:
- key: tls.crt
path: front-proxy-ca.crt
name: front-proxy-ca
- secret:
items:
- key: tls.crt
path: front-proxy-client.crt
- key: tls.key
path: front-proxy-client.key
name: front-proxy-client`
controllerManagerProjectedSecret = `- name: k8s
projected:
sources:
- secret:
name: controller-manager.conf
- secret:
items:
- key: tls.crt
path: ca.crt
- key: tls.key
path: ca.key
name: ca
- secret:
items:
- key: tls.key
path: sa.key
name: sa`
schedulerProjectedSecret = `- name: k8s
projected:
sources:
- secret:
name: scheduler.conf`
hostPathVol = `- hostPath:
path: /etc/kubernetes
name: k8s`
testAPIServerPod = `
apiVersion: v1
kind: Pod
@ -110,39 +35,36 @@ metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- --allow-privileged=true
- --service-cluster-ip-range=10.96.0.0/12
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --secure-port=6443
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --requestheader-group-headers=X-Remote-Group
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --secure-port=6443
- --insecure-port=0
- --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --experimental-bootstrap-token-auth=true
- --requestheader-group-headers=X-Remote-Group
- --requestheader-allowed-names=front-proxy-client
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --advertise-address=192.168.1.115
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --insecure-port=0
- --experimental-bootstrap-token-auth=true
- --requestheader-username-headers=X-Remote-User
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-allowed-names=front-proxy-client
- --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
- --allow-privileged=true
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --authorization-mode=Node,RBAC
- --advertise-address=192.168.200.101
- --etcd-servers=http://127.0.0.1:2379
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.0
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -157,22 +79,26 @@ spec:
requests:
cpu: 250m
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: pki
name: ca-certs-etc-pki
readOnly: true
hostNetwork: true
volumes:
%s
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: certs
name: ca-certs
- hostPath:
path: /etc/pki
name: pki
name: ca-certs-etc-pki
status: {}
`
@ -192,30 +118,30 @@ spec:
containers:
- command:
- kube-apiserver
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- --allow-privileged=true
- --service-cluster-ip-range=10.96.0.0/12
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --secure-port=6443
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --requestheader-group-headers=X-Remote-Group
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --secure-port=6443
- --insecure-port=0
- --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --experimental-bootstrap-token-auth=true
- --requestheader-group-headers=X-Remote-Group
- --requestheader-allowed-names=front-proxy-client
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --advertise-address=192.168.1.115
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --insecure-port=0
- --experimental-bootstrap-token-auth=true
- --requestheader-username-headers=X-Remote-User
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-allowed-names=front-proxy-client
- --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
- --allow-privileged=true
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --authorization-mode=Node,RBAC
- --advertise-address=192.168.200.101
- --etcd-servers=http://127.0.0.1:2379
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.0
image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -230,13 +156,15 @@ spec:
requests:
cpu: 250m
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: pki
name: ca-certs-etc-pki
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
@ -245,13 +173,15 @@ spec:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
%s
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: certs
name: ca-certs
- hostPath:
path: /etc/pki
name: pki
name: ca-certs-etc-pki
updateStrategy: {}
status:
currentNumberScheduled: 0
@ -267,25 +197,22 @@ metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: kube-controller-manager
tier: control-plane
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- kube-controller-manager
- --leader-elect=true
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --controllers=*,bootstrapsigner,tokencleaner
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --address=127.0.0.1
- --use-service-account-credentials=true
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.0
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -300,22 +227,32 @@ spec:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: certs
name: ca-certs
readOnly: true
- mountPath: /etc/kubernetes/controller-manager.conf
name: kubeconfig
readOnly: true
- mountPath: /etc/pki
name: pki
name: ca-certs-etc-pki
readOnly: true
hostNetwork: true
volumes:
%s
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: certs
name: ca-certs
- hostPath:
path: /etc/kubernetes/controller-manager.conf
name: kubeconfig
- hostPath:
path: /etc/pki
name: pki
name: ca-certs-etc-pki
status: {}
`
@ -335,16 +272,16 @@ spec:
containers:
- command:
- kube-controller-manager
- --leader-elect=true
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --controllers=*,bootstrapsigner,tokencleaner
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --address=127.0.0.1
- --use-service-account-credentials=true
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.0
image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -359,13 +296,18 @@ spec:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/ssl/certs
name: certs
name: ca-certs
readOnly: true
- mountPath: /etc/kubernetes/controller-manager.conf
name: kubeconfig
readOnly: true
- mountPath: /etc/pki
name: pki
name: ca-certs-etc-pki
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
nodeSelector:
@ -374,13 +316,18 @@ spec:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
%s
- hostPath:
path: /etc/kubernetes/pki
name: k8s-certs
- hostPath:
path: /etc/ssl/certs
name: certs
name: ca-certs
- hostPath:
path: /etc/kubernetes/controller-manager.conf
name: kubeconfig
- hostPath:
path: /etc/pki
name: pki
name: ca-certs-etc-pki
updateStrategy: {}
status:
currentNumberScheduled: 0
@ -396,19 +343,16 @@ metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --address=127.0.0.1
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/scheduler.conf
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.0
- --address=127.0.0.1
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -423,12 +367,14 @@ spec:
requests:
cpu: 100m
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
hostNetwork: true
volumes:
%s
- hostPath:
path: /etc/kubernetes/scheduler.conf
name: kubeconfig
status: {}
`
@ -448,10 +394,10 @@ spec:
containers:
- command:
- kube-scheduler
- --address=127.0.0.1
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/scheduler.conf
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.0
- --address=127.0.0.1
image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
livenessProbe:
failureThreshold: 8
httpGet:
@ -466,8 +412,8 @@ spec:
requests:
cpu: 100m
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
@ -477,7 +423,9 @@ spec:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
%s
- hostPath:
path: /etc/kubernetes/scheduler.conf
name: kubeconfig
updateStrategy: {}
status:
currentNumberScheduled: 0
@ -487,67 +435,26 @@ status:
`
)
var (
testAPIServerSecretsPod = fmt.Sprintf(testAPIServerPod, apiProjectedSecret)
testAPIServerSecretsDS = fmt.Sprintf(testAPIServerDaemonSet, indentString(apiProjectedSecret, 4))
testAPIServerHostPathPod = fmt.Sprintf(testAPIServerPod, hostPathVol)
testAPIServerHostPathDS = fmt.Sprintf(testAPIServerDaemonSet, indentString(hostPathVol, 4))
testSchedulerSecretsPod = fmt.Sprintf(testSchedulerPod, schedulerProjectedSecret)
testSchedulerSecretsDS = fmt.Sprintf(testSchedulerDaemonSet, indentString(schedulerProjectedSecret, 4))
testSchedulerHostPathPod = fmt.Sprintf(testSchedulerPod, hostPathVol)
testSchedulerHostPathDS = fmt.Sprintf(testSchedulerDaemonSet, indentString(hostPathVol, 4))
testControllerManagerSecretsPod = fmt.Sprintf(testControllerManagerPod, controllerManagerProjectedSecret)
testControllerManagerSecretsDS = fmt.Sprintf(testControllerManagerDaemonSet, indentString(controllerManagerProjectedSecret, 4))
testControllerManagerHostPathPod = fmt.Sprintf(testControllerManagerPod, hostPathVol)
testControllerManagerHostPathDS = fmt.Sprintf(testControllerManagerDaemonSet, indentString(hostPathVol, 4))
)
func TestBuildDaemonSet(t *testing.T) {
var tests = []struct {
component string
podBytes []byte
dsBytes []byte
selfHostedSecrets bool
}{
// vols as secrets
{
component: kubeadmconstants.KubeAPIServer,
podBytes: []byte(testAPIServerSecretsPod),
dsBytes: []byte(testAPIServerSecretsDS),
selfHostedSecrets: true,
component: constants.KubeAPIServer,
podBytes: []byte(testAPIServerPod),
dsBytes: []byte(testAPIServerDaemonSet),
},
{
component: kubeadmconstants.KubeControllerManager,
podBytes: []byte(testControllerManagerSecretsPod),
dsBytes: []byte(testControllerManagerSecretsDS),
selfHostedSecrets: true,
component: constants.KubeControllerManager,
podBytes: []byte(testControllerManagerPod),
dsBytes: []byte(testControllerManagerDaemonSet),
},
{
component: kubeadmconstants.KubeScheduler,
podBytes: []byte(testSchedulerSecretsPod),
dsBytes: []byte(testSchedulerSecretsDS),
selfHostedSecrets: true,
},
// hostPath vols
{
component: kubeadmconstants.KubeAPIServer,
podBytes: []byte(testAPIServerHostPathPod),
dsBytes: []byte(testAPIServerHostPathDS),
selfHostedSecrets: false,
},
{
component: kubeadmconstants.KubeControllerManager,
podBytes: []byte(testControllerManagerHostPathPod),
dsBytes: []byte(testControllerManagerHostPathDS),
selfHostedSecrets: false,
},
{
component: kubeadmconstants.KubeScheduler,
podBytes: []byte(testSchedulerHostPathPod),
dsBytes: []byte(testSchedulerHostPathDS),
selfHostedSecrets: false,
component: constants.KubeScheduler,
podBytes: []byte(testSchedulerPod),
dsBytes: []byte(testSchedulerDaemonSet),
},
}
@ -557,21 +464,17 @@ func TestBuildDaemonSet(t *testing.T) {
podSpec, err := loadPodSpecFromFile(tempFile)
if err != nil {
t.Fatalf("couldn't load the specified Pod: %v", err)
t.Fatalf("couldn't load the specified Pod")
}
cfg := &kubeadmapi.MasterConfiguration{
FeatureFlags: map[string]bool{string(features.StoreCertsInSecrets): rt.selfHostedSecrets},
}
ds := buildDaemonSet(cfg, rt.component, podSpec)
ds := buildDaemonSet(rt.component, podSpec, getDefaultMutators())
dsBytes, err := yaml.Marshal(ds)
if err != nil {
t.Fatalf("failed to marshal daemonset to YAML: %v", err)
}
if !bytes.Equal(dsBytes, rt.dsBytes) {
t.Errorf("failed TestBuildDaemonSet for name=%s (secrets=%t):\nexpected:\n%s\nsaw:\n%s", rt.component, rt.selfHostedSecrets, rt.dsBytes, dsBytes)
t.Errorf("failed TestBuildDaemonSet:\nexpected:\n%s\nsaw:\n%s", rt.dsBytes, dsBytes)
}
}
}
@ -651,18 +554,3 @@ func createTempFileWithContent(content []byte) (string, error) {
}
return tempFile.Name(), nil
}
func indentString(input string, count int) string {
output := ""
lines := strings.Split(input, "\n")
for i, line := range lines {
if i > 0 {
output += strings.Repeat(" ", count)
}
output += line
if i < len(lines)-1 {
output += "\n"
}
}
return output
}

View File

@ -24,14 +24,8 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/features"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
const (
volumeName = "k8s"
volumeMountName = "k8s"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
type tlsKeyPair struct {
@ -40,18 +34,8 @@ type tlsKeyPair struct {
key string
}
func k8sSelfHostedVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: volumeMountName,
MountPath: kubeadmconstants.KubernetesDir,
ReadOnly: true,
}
}
func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
var volumeSource v1.VolumeSource
if features.Enabled(cfg.FeatureFlags, features.StoreCertsInSecrets) {
volumeSource = v1.VolumeSource{
func apiServerCertificatesVolumeSource() v1.VolumeSource {
return v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
@ -62,11 +46,7 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CACertName),
},
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CAKeyName),
Path: kubeadmconstants.CACertName,
},
},
},
@ -79,11 +59,11 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerCertName),
Path: kubeadmconstants.APIServerCertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerKeyName),
Path: kubeadmconstants.APIServerKeyName,
},
},
},
@ -96,11 +76,11 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerKubeletClientCertName),
Path: kubeadmconstants.APIServerKubeletClientCertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.APIServerKubeletClientKeyName),
Path: kubeadmconstants.APIServerKubeletClientKeyName,
},
},
},
@ -113,11 +93,7 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.ServiceAccountPublicKeyName),
},
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.ServiceAccountPrivateKeyName),
Path: kubeadmconstants.ServiceAccountPublicKeyName,
},
},
},
@ -130,7 +106,7 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.FrontProxyCACertName),
Path: kubeadmconstants.FrontProxyCACertName,
},
},
},
@ -143,11 +119,11 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.FrontProxyClientCertName),
Path: kubeadmconstants.FrontProxyClientCertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.FrontProxyClientKeyName),
Path: kubeadmconstants.FrontProxyClientKeyName,
},
},
},
@ -155,61 +131,12 @@ func apiServerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
},
},
}
} else {
volumeSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: kubeadmconstants.KubernetesDir,
},
}
}
return v1.Volume{
Name: volumeName,
VolumeSource: volumeSource,
}
}
func schedulerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
var volumeSource v1.VolumeSource
if features.Enabled(cfg.FeatureFlags, features.StoreCertsInSecrets) {
volumeSource = v1.VolumeSource{
func controllerManagerCertificatesVolumeSource() v1.VolumeSource {
return v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.SchedulerKubeConfigFileName,
},
},
},
},
},
}
} else {
volumeSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: kubeadmconstants.KubernetesDir,
},
}
}
return v1.Volume{
Name: volumeName,
VolumeSource: volumeSource,
}
}
func controllerManagerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
var volumeSource v1.VolumeSource
if features.Enabled(cfg.FeatureFlags, features.StoreCertsInSecrets) {
volumeSource = v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: kubeadmconstants.ControllerManagerKubeConfigFileName,
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
@ -218,11 +145,11 @@ func controllerManagerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSCertKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CACertName),
Path: kubeadmconstants.CACertName,
},
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.CAKeyName),
Path: kubeadmconstants.CAKeyName,
},
},
},
@ -235,7 +162,7 @@ func controllerManagerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
Items: []v1.KeyToPath{
{
Key: v1.TLSPrivateKeyKey,
Path: path.Join(path.Base(cfg.CertificatesDir), kubeadmconstants.ServiceAccountPrivateKeyName),
Path: kubeadmconstants.ServiceAccountPrivateKeyName,
},
},
},
@ -243,31 +170,28 @@ func controllerManagerVolume(cfg *kubeadmapi.MasterConfiguration) v1.Volume {
},
},
}
} else {
volumeSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: kubeadmconstants.KubernetesDir,
},
}
}
return v1.Volume{
Name: volumeName,
VolumeSource: volumeSource,
}
}
func createTLSSecrets(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
func kubeConfigVolumeSource(kubeconfigSecretName string) v1.VolumeSource {
return v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: kubeconfigSecretName,
},
}
}
func uploadTLSSecrets(client clientset.Interface, certDir string) error {
for _, tlsKeyPair := range getTLSKeyPairs() {
secret, err := createTLSSecretFromFiles(
tlsKeyPair.name,
path.Join(cfg.CertificatesDir, tlsKeyPair.cert),
path.Join(cfg.CertificatesDir, tlsKeyPair.key),
path.Join(certDir, tlsKeyPair.cert),
path.Join(certDir, tlsKeyPair.key),
)
if err != nil {
return err
}
if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil {
if err := apiclient.CreateOrUpdateSecret(client, secret); err != nil {
return err
}
fmt.Printf("[self-hosted] Created TLS secret %q from %s and %s\n", tlsKeyPair.name, tlsKeyPair.cert, tlsKeyPair.key)
@ -276,24 +200,22 @@ func createTLSSecrets(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte
return nil
}
func createOpaqueSecrets(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
func uploadKubeConfigSecrets(client clientset.Interface) error {
files := []string{
kubeadmconstants.SchedulerKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
}
for _, file := range files {
secret, err := createOpaqueSecretFromFile(
file,
path.Join(kubeadmconstants.KubernetesDir, file),
)
kubeConfigPath := path.Join(kubeadmconstants.KubernetesDir, file)
secret, err := createOpaqueSecretFromFile(file, kubeConfigPath)
if err != nil {
return err
}
if _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil {
if err := apiclient.CreateOrUpdateSecret(client, secret); err != nil {
return err
}
fmt.Printf("[self-hosted] Created secret %q\n", file)
fmt.Printf("[self-hosted] Created secret for kubeconfig file %q\n", file)
}
return nil

View File

@ -9,6 +9,7 @@ load(
go_library(
name = "go_default_library",
srcs = [
"arguments.go",
"endpoint.go",
"error.go",
"template.go",
@ -24,6 +25,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"arguments_test.go",
"endpoint_test.go",
"error_test.go",
"template_test.go",

View File

@ -43,6 +43,20 @@ func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error
return nil
}
// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(secret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("unable to create secret: %v", err)
}
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(secret); err != nil {
return fmt.Errorf("unable to update secret: %v", err)
}
}
return nil
}
// CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {
if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(sa); err != nil {

View File

@ -88,3 +88,22 @@ func WaitForStaticPodToDisappear(client clientset.Interface, timeout time.Durati
return false, nil
})
}
// TryRunCommand runs a function a maximum of failureThreshold times, and retries on error. If failureThreshold is hit; the last error is returned
func TryRunCommand(f func() error, failureThreshold uint8) error {
var numFailures uint8
return wait.PollImmediate(5*time.Second, 20*time.Minute, func() (bool, error) {
err := f()
if err != nil {
numFailures++
// If we've reached the maximum amount of failures, error out
if numFailures == failureThreshold {
return false, err
}
// Retry
return false, nil
}
// The last f() call was a success!
return true, nil
})
}

View File

@ -0,0 +1,96 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"strings"
)
// BuildArgumentListFromMap takes two string-string maps, one with the base arguments and one with optional override arguments
func BuildArgumentListFromMap(baseArguments map[string]string, overrideArguments map[string]string) []string {
var command []string
for k, v := range overrideArguments {
// values of "" are allowed as well
command = append(command, fmt.Sprintf("--%s=%s", k, v))
}
for k, v := range baseArguments {
if _, overrideExists := overrideArguments[k]; !overrideExists {
command = append(command, fmt.Sprintf("--%s=%s", k, v))
}
}
return command
}
// ParseArgumentListToMap parses a CLI argument list in the form "--foo=bar" to a string-string map
func ParseArgumentListToMap(arguments []string) map[string]string {
resultingMap := map[string]string{}
for i, arg := range arguments {
key, val, err := parseArgument(arg)
// Ignore if the first argument doesn't satisfy the criteria, it's most often the binary name
// Warn in all other cases, but don't error out. This can happen only if the user has edited the argument list by hand, so they might know what they are doing
if err != nil {
if i != 0 {
fmt.Printf("[kubeadm] WARNING: The component argument %q could not be parsed correctly. The argument must be of the form %q. Skipping...", arg, "--")
}
continue
}
resultingMap[key] = val
}
return resultingMap
}
// ReplaceArgument gets a command list; converts it to a map for easier modification, runs the provided function that
// returns a new modified map, and then converts the map back to a command string slice
func ReplaceArgument(command []string, argMutateFunc func(map[string]string) map[string]string) []string {
argMap := ParseArgumentListToMap(command)
// Save the first command (the executable) if we're sure it's not an argument (i.e. no --)
var newCommand []string
if len(command) > 0 && !strings.HasPrefix(command[0], "--") {
newCommand = append(newCommand, command[0])
}
newArgMap := argMutateFunc(argMap)
newCommand = append(newCommand, BuildArgumentListFromMap(newArgMap, map[string]string{})...)
return newCommand
}
// parseArgument parses the argument "--foo=bar" to "foo" and "bar"
func parseArgument(arg string) (string, string, error) {
if !strings.HasPrefix(arg, "--") {
return "", "", fmt.Errorf("the argument should start with '--'")
}
if !strings.Contains(arg, "=") {
return "", "", fmt.Errorf("the argument should have a '=' between the flag and the value")
}
// Remove the starting --
arg = strings.TrimPrefix(arg, "--")
// Split the string on =. Return only two substrings, since we want only key/value, but the value can include '=' as well
keyvalSlice := strings.SplitN(arg, "=", 2)
// Make sure both a key and value is present
if len(keyvalSlice) != 2 {
return "", "", fmt.Errorf("the argument must have both a key and a value")
}
if len(keyvalSlice[0]) == 0 {
return "", "", fmt.Errorf("the argument must have a key")
}
return keyvalSlice[0], keyvalSlice[1], nil
}

View File

@ -0,0 +1,341 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
"sort"
"testing"
)
func TestBuildArgumentListFromMap(t *testing.T) {
var tests = []struct {
base map[string]string
overrides map[string]string
expected []string
}{
{ // override an argument from the base
base: map[string]string{
"admission-control": "NamespaceLifecycle",
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
},
overrides: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
},
expected: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
},
{ // add an argument that is not in base
base: map[string]string{
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
},
overrides: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
},
expected: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
},
{ // allow empty strings in base
base: map[string]string{
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
"something-that-allows-empty-string": "",
},
overrides: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
},
expected: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
"--something-that-allows-empty-string=",
},
},
{ // allow empty strings in overrides
base: map[string]string{
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
"something-that-allows-empty-string": "foo",
},
overrides: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
"something-that-allows-empty-string": "",
},
expected: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
"--something-that-allows-empty-string=",
},
},
}
for _, rt := range tests {
actual := BuildArgumentListFromMap(rt.base, rt.overrides)
sort.Strings(actual)
sort.Strings(rt.expected)
if !reflect.DeepEqual(actual, rt.expected) {
t.Errorf("failed BuildArgumentListFromMap:\nexpected:\n%v\nsaw:\n%v", rt.expected, actual)
}
}
}
func TestParseArgumentListToMap(t *testing.T) {
var tests = []struct {
args []string
expectedMap map[string]string
}{
{
// normal case
args: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
expectedMap: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
},
},
{
// test that feature-gates is working
args: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
"--feature-gates=EnableFoo=true,EnableBar=false",
},
expectedMap: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
"feature-gates": "EnableFoo=true,EnableBar=false",
},
},
{
// test that a binary can be the first arg
args: []string{
"kube-apiserver",
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
"--feature-gates=EnableFoo=true,EnableBar=false",
},
expectedMap: map[string]string{
"admission-control": "NamespaceLifecycle,LimitRanger",
"insecure-bind-address": "127.0.0.1",
"allow-privileged": "true",
"feature-gates": "EnableFoo=true,EnableBar=false",
},
},
}
for _, rt := range tests {
actualMap := ParseArgumentListToMap(rt.args)
if !reflect.DeepEqual(actualMap, rt.expectedMap) {
t.Errorf("failed ParseArgumentListToMap:\nexpected:\n%v\nsaw:\n%v", rt.expectedMap, actualMap)
}
}
}
func TestReplaceArgument(t *testing.T) {
var tests = []struct {
args []string
mutateFunc func(map[string]string) map[string]string
expectedArgs []string
}{
{
// normal case
args: []string{
"kube-apiserver",
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
mutateFunc: func(argMap map[string]string) map[string]string {
argMap["admission-control"] = "NamespaceLifecycle,LimitRanger,ResourceQuota"
return argMap
},
expectedArgs: []string{
"kube-apiserver",
"--admission-control=NamespaceLifecycle,LimitRanger,ResourceQuota",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
},
{
// normal case
args: []string{
"kube-apiserver",
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
mutateFunc: func(argMap map[string]string) map[string]string {
argMap["new-arg-here"] = "foo"
return argMap
},
expectedArgs: []string{
"kube-apiserver",
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
"--new-arg-here=foo",
},
},
}
for _, rt := range tests {
actualArgs := ReplaceArgument(rt.args, rt.mutateFunc)
sort.Strings(actualArgs)
sort.Strings(rt.expectedArgs)
if !reflect.DeepEqual(actualArgs, rt.expectedArgs) {
t.Errorf("failed ReplaceArgument:\nexpected:\n%v\nsaw:\n%v", rt.expectedArgs, actualArgs)
}
}
}
func TestRoundtrip(t *testing.T) {
var tests = []struct {
args []string
}{
{
// normal case
args: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
},
},
{
// test that feature-gates is working
args: []string{
"--admission-control=NamespaceLifecycle,LimitRanger",
"--insecure-bind-address=127.0.0.1",
"--allow-privileged=true",
"--feature-gates=EnableFoo=true,EnableBar=false",
},
},
}
for _, rt := range tests {
// These two methods should be each other's opposite functions, test that by chaining the methods and see if you get the same result back
actual := BuildArgumentListFromMap(ParseArgumentListToMap(rt.args), map[string]string{})
sort.Strings(actual)
sort.Strings(rt.args)
if !reflect.DeepEqual(actual, rt.args) {
t.Errorf("failed TestRoundtrip:\nexpected:\n%v\nsaw:\n%v", rt.args, actual)
}
}
}
func TestParseArgument(t *testing.T) {
var tests = []struct {
arg string
expectedKey string
expectedVal string
expectedErr bool
}{
{
// cannot be empty
arg: "",
expectedErr: true,
},
{
// must contain -- and =
arg: "a",
expectedErr: true,
},
{
// must contain -- and =
arg: "a-z",
expectedErr: true,
},
{
// must contain --
arg: "a=b",
expectedErr: true,
},
{
// must contain a key
arg: "--=b",
expectedErr: true,
},
{
// can contain key but no value
arg: "--a=",
expectedKey: "a",
expectedVal: "",
expectedErr: false,
},
{
// simple case
arg: "--a=b",
expectedKey: "a",
expectedVal: "b",
expectedErr: false,
},
{
// keys/values with '-' should be supported
arg: "--very-long-flag-name=some-value",
expectedKey: "very-long-flag-name",
expectedVal: "some-value",
expectedErr: false,
},
{
// numbers should be handled correctly
arg: "--some-number=0.2",
expectedKey: "some-number",
expectedVal: "0.2",
expectedErr: false,
},
{
// lists should be handled correctly
arg: "--admission-control=foo,bar,baz",
expectedKey: "admission-control",
expectedVal: "foo,bar,baz",
expectedErr: false,
},
{
// more than one '=' should be allowed
arg: "--feature-gates=EnableFoo=true,EnableBar=false",
expectedKey: "feature-gates",
expectedVal: "EnableFoo=true,EnableBar=false",
expectedErr: false,
},
}
for _, rt := range tests {
key, val, actual := parseArgument(rt.arg)
if (actual != nil) != rt.expectedErr {
t.Errorf("failed parseArgument:\nexpected error:\n%t\nsaw error:\n%v", rt.expectedErr, actual)
}
if (key != rt.expectedKey) || (val != rt.expectedVal) {
t.Errorf("failed parseArgument:\nexpected key: %s\nsaw key: %s\nexpected value: %s\nsaw value: %s", rt.expectedKey, key, rt.expectedVal, val)
}
}
}