diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 124f15e17e3..f08c47b59a2 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -25,6 +25,7 @@ import ( "github.com/renstrom/dedent" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/runtime" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" @@ -32,12 +33,8 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/discovery" kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master" "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig" - certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/pkg/api" @@ -66,11 +63,12 @@ func NewCmdInit(out io.Writer) *cobra.Command { var cfgPath string var skipPreFlight bool + var selfHosted bool cmd := &cobra.Command{ Use: "init", Short: "Run this in order to set up the Kubernetes master", Run: func(cmd *cobra.Command, args []string) { - i, err := NewInit(cfgPath, &cfg, skipPreFlight) + i, err := NewInit(cfgPath, &cfg, skipPreFlight, selfHosted) kubeadmutil.CheckErr(err) kubeadmutil.CheckErr(i.Validate()) kubeadmutil.CheckErr(i.Run(out)) @@ -115,7 +113,7 @@ func NewCmdInit(out io.Writer) *cobra.Command { cmd.PersistentFlags().BoolVar( &skipPreFlight, "skip-preflight-checks", skipPreFlight, - "skip preflight checks normally run before modifying the system", + "Skip preflight checks normally run before modifying the system", ) cmd.PersistentFlags().Var( @@ -123,14 +121,15 @@ func NewCmdInit(out io.Writer) *cobra.Command { "The discovery method kubeadm will use for connecting nodes to the master", ) + cmd.PersistentFlags().BoolVar( + &selfHosted, "self-hosted", selfHosted, + "Enable self-hosted control plane", + ) + return cmd } -type Init struct { - cfg *kubeadmapi.MasterConfiguration -} - -func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool) (*Init, error) { +func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool, selfHosted bool) (*Init, error) { fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.") @@ -169,9 +168,21 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight // Try to start the kubelet service in case it's inactive preflight.TryStartKubelet() - return &Init{cfg: cfg}, nil + // Warn about the limitations with the current cloudprovider solution. + if cfg.CloudProvider != "" { + fmt.Println("WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.") + fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)") + } + + return &Init{cfg: cfg, selfHosted: selfHosted}, nil } +type Init struct { + cfg *kubeadmapi.MasterConfiguration + selfHosted bool +} + +// Validate validates configuration passed to "kubeadm init" func (i *Init) Validate() error { return validation.ValidateMasterConfiguration(i.cfg).ToAggregate() } @@ -246,6 +257,16 @@ func (i *Init) Run(out io.Writer) error { } } + // Is deployment type self-hosted? + if i.selfHosted { + // Temporary control plane is up, now we create our self hosted control + // plane components and remove the static manifests: + fmt.Println("[init] Creating self-hosted control plane...") + if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil { + return err + } + } + if err := kubemaster.CreateEssentialAddons(i.cfg, client); err != nil { return err } diff --git a/cmd/kubeadm/app/master/BUILD b/cmd/kubeadm/app/master/BUILD index d2ffeb85f39..af147376b46 100644 --- a/cmd/kubeadm/app/master/BUILD +++ b/cmd/kubeadm/app/master/BUILD @@ -15,6 +15,7 @@ go_library( "apiclient.go", "discovery.go", "manifests.go", + "selfhosted.go", "tokens.go", ], tags = ["automanaged"], diff --git a/cmd/kubeadm/app/master/apiclient.go b/cmd/kubeadm/app/master/apiclient.go index 4f1a67f2088..b72f948d85c 100644 --- a/cmd/kubeadm/app/master/apiclient.go +++ b/cmd/kubeadm/app/master/apiclient.go @@ -65,38 +65,12 @@ func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) { if err != nil { return nil, err } + fmt.Println("[apiclient] Created API client, waiting for the control plane to become ready") - - start := time.Now() - wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { - // TODO: use /healthz API instead of this - cs, err := client.ComponentStatuses().List(v1.ListOptions{}) - if err != nil { - if apierrs.IsForbidden(err) { - fmt.Println("[apiclient] Waiting for API server authorization") - } - return false, nil - } - // TODO(phase2) must revisit this when we implement HA - if len(cs.Items) < 3 { - fmt.Println("[apiclient] Not all control plane components are ready yet") - return false, nil - } - for _, item := range cs.Items { - for _, condition := range item.Conditions { - if condition.Type != v1.ComponentHealthy { - fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions) - return false, nil - } - } - } - - fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds()) - return true, nil - }) + WaitForAPI(client) fmt.Println("[apiclient] Waiting for at least one node to register and become ready") - start = time.Now() + start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { nodeList, err := client.Nodes().List(v1.ListOptions{}) if err != nil { @@ -128,6 +102,36 @@ func standardLabels(n string) map[string]string { } } +func WaitForAPI(client *clientset.Clientset) { + start := time.Now() + wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { + // TODO: use /healthz API instead of this + cs, err := client.ComponentStatuses().List(v1.ListOptions{}) + if err != nil { + if apierrs.IsForbidden(err) { + fmt.Println("[apiclient] Waiting for API server authorization") + } + return false, nil + } + + // TODO(phase2) must revisit this when we implement HA + if len(cs.Items) < 3 { + return false, nil + } + for _, item := range cs.Items { + for _, condition := range item.Conditions { + if condition.Type != v1.ComponentHealthy { + fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions) + return false, nil + } + } + } + + fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds()) + return true, nil + }) +} + func NewDaemonSet(daemonName string, podSpec v1.PodSpec) *extensions.DaemonSet { l := standardLabels(daemonName) return &extensions.DaemonSet{ diff --git a/cmd/kubeadm/app/master/manifests.go b/cmd/kubeadm/app/master/manifests.go index 069cb0f669c..65e9dbd1409 100644 --- a/cmd/kubeadm/app/master/manifests.go +++ b/cmd/kubeadm/app/master/manifests.go @@ -82,7 +82,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { kubeAPIServer: componentPod(api.Container{ Name: kubeAPIServer, Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), - Command: getAPIServerCommand(cfg), + Command: getAPIServerCommand(cfg, false), VolumeMounts: volumeMounts, LivenessProbe: componentProbe(8080, "/healthz"), Resources: componentResources("250m"), @@ -91,7 +91,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { kubeControllerManager: componentPod(api.Container{ Name: kubeControllerManager, Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), - Command: getControllerManagerCommand(cfg), + Command: getControllerManagerCommand(cfg, false), VolumeMounts: volumeMounts, LivenessProbe: componentProbe(10252, "/healthz"), Resources: componentResources("200m"), @@ -100,7 +100,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { kubeScheduler: componentPod(api.Container{ Name: kubeScheduler, Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), - Command: getSchedulerCommand(cfg), + Command: getSchedulerCommand(cfg, false), LivenessProbe: componentProbe(10251, "/healthz"), Resources: componentResources("100m"), Env: getProxyEnvVars(), @@ -217,6 +217,23 @@ func pkiVolumeMount() api.VolumeMount { } } +func flockVolume() api.Volume { + return api.Volume{ + Name: "var-lock", + VolumeSource: api.VolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/var/lock"}, + }, + } +} + +func flockVolumeMount() api.VolumeMount { + return api.VolumeMount{ + Name: "var-lock", + MountPath: "/var/lock", + ReadOnly: false, + } +} + func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume { return api.Volume{ Name: "k8s", @@ -284,8 +301,15 @@ func getComponentBaseCommand(component string) []string { return []string{"kube-" + component} } -func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string { - command := append(getComponentBaseCommand(apiServer), +func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string { + var command []string + + // self-hosted apiserver needs to wait on a lock + if selfHosted { + command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"} + } + + command = append(getComponentBaseCommand(apiServer), "--insecure-bind-address=127.0.0.1", "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota", "--service-cluster-ip-range="+cfg.Networking.ServiceSubnet, @@ -310,7 +334,11 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string { // Use first address we are given if len(cfg.API.AdvertiseAddresses) > 0 { - command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0])) + if selfHosted { + command = append(command, "--advertise-address=$(POD_IP)") + } else { + command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0])) + } } if len(cfg.KubernetesVersion) != 0 { @@ -359,8 +387,15 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string { return command } -func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string { - command := append(getComponentBaseCommand(controllerManager), +func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string { + var command []string + + // self-hosted controller-manager needs to wait on a lock + if selfHosted { + command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/controller-manager.lock"} + } + + command = append(getComponentBaseCommand(controllerManager), "--address=127.0.0.1", "--leader-elect", "--master=127.0.0.1:8080", @@ -386,15 +421,25 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string { if cfg.Networking.PodSubnet != "" { command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet) } + return command } -func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) []string { - return append(getComponentBaseCommand(scheduler), +func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string { + var command []string + + // self-hosted apiserver needs to wait on a lock + if selfHosted { + command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"} + } + + command = append(getComponentBaseCommand(scheduler), "--address=127.0.0.1", "--leader-elect", "--master=127.0.0.1:8080", ) + + return command } func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) []string { @@ -418,3 +463,16 @@ func getProxyEnvVars() []api.EnvVar { } return envs } + +func getSelfHostedAPIServerEnv() []api.EnvVar { + podIPEnvVar := api.EnvVar{ + Name: "POD_IP", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + } + + return append(getProxyEnvVars(), podIPEnvVar) +} diff --git a/cmd/kubeadm/app/master/manifests_test.go b/cmd/kubeadm/app/master/manifests_test.go index 214bf0424c3..11d028d6678 100644 --- a/cmd/kubeadm/app/master/manifests_test.go +++ b/cmd/kubeadm/app/master/manifests_test.go @@ -454,7 +454,7 @@ func TestGetAPIServerCommand(t *testing.T) { } for _, rt := range tests { - actual := getAPIServerCommand(rt.cfg) + actual := getAPIServerCommand(rt.cfg, false) for i := range actual { if actual[i] != rt.expected[i] { t.Errorf( @@ -523,7 +523,7 @@ func TestGetControllerManagerCommand(t *testing.T) { } for _, rt := range tests { - actual := getControllerManagerCommand(rt.cfg) + actual := getControllerManagerCommand(rt.cfg, false) for i := range actual { if actual[i] != rt.expected[i] { t.Errorf( @@ -553,7 +553,7 @@ func TestGetSchedulerCommand(t *testing.T) { } for _, rt := range tests { - actual := getSchedulerCommand(rt.cfg) + actual := getSchedulerCommand(rt.cfg, false) for i := range actual { if actual[i] != rt.expected[i] { t.Errorf( diff --git a/cmd/kubeadm/app/master/selfhosted.go b/cmd/kubeadm/app/master/selfhosted.go new file mode 100644 index 00000000000..b0c54e48c36 --- /dev/null +++ b/cmd/kubeadm/app/master/selfhosted.go @@ -0,0 +1,331 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "encoding/json" + "fmt" + "os" + "path" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/images" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + ext "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" +) + +func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error { + volumes := []v1.Volume{k8sVolume(cfg)} + volumeMounts := []v1.VolumeMount{k8sVolumeMount()} + if isCertsVolumeMountNeeded() { + volumes = append(volumes, certsVolume(cfg)) + volumeMounts = append(volumeMounts, certsVolumeMount()) + } + + if isPkiVolumeMountNeeded() { + volumes = append(volumes, pkiVolume(cfg)) + volumeMounts = append(volumeMounts, pkiVolumeMount()) + } + + // Need lock for self-hosted + volumes = append(volumes, flockVolume()) + volumeMounts = append(volumeMounts, flockVolumeMount()) + + if err := launchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil { + return err + } + + if err := launchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil { + return err + } + + if err := launchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil { + return err + } + + return nil +} + +func launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { + start := time.Now() + + apiServer := getAPIServerDS(cfg, volumes, volumeMounts) + if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(&apiServer); err != nil { + return fmt.Errorf("failed to create self-hosted %q daemon set [%v]", kubeAPIServer, err) + } + + wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { + // TODO: This might be pointless, checking the pods is probably enough. + // It does however get us a count of how many there should be which may be useful + // with HA. + apiDS, err := client.DaemonSets(api.NamespaceSystem).Get("self-hosted-"+kubeAPIServer, + metav1.GetOptions{}) + if err != nil { + fmt.Println("[self-hosted] error getting apiserver DaemonSet:", err) + return false, nil + } + fmt.Printf("[self-hosted] %s DaemonSet current=%d, desired=%d\n", + kubeAPIServer, + apiDS.Status.CurrentNumberScheduled, + apiDS.Status.DesiredNumberScheduled) + + if apiDS.Status.CurrentNumberScheduled != apiDS.Status.DesiredNumberScheduled { + return false, nil + } + + return true, nil + }) + + // Wait for self-hosted API server to take ownership + waitForPodsWithLabel(client, "self-hosted-"+kubeAPIServer, true) + + // Remove temporary API server + apiServerStaticManifestPath := buildStaticManifestFilepath(kubeAPIServer) + if err := os.RemoveAll(apiServerStaticManifestPath); err != nil { + return fmt.Errorf("unable to delete temporary API server manifest [%v]", err) + } + + WaitForAPI(client) + + fmt.Printf("[self-hosted] self-hosted kube-apiserver ready after %f seconds\n", time.Since(start).Seconds()) + return nil +} + +func launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { + start := time.Now() + + ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts) + if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&ctrlMgr); err != nil { + return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err) + } + + waitForPodsWithLabel(client, "self-hosted-"+kubeControllerManager, false) + + ctrlMgrStaticManifestPath := buildStaticManifestFilepath(kubeControllerManager) + if err := os.RemoveAll(ctrlMgrStaticManifestPath); err != nil { + return fmt.Errorf("unable to delete temporary controller manager manifest [%v]", err) + } + + fmt.Printf("[self-hosted] self-hosted kube-controller-manager ready after %f seconds\n", time.Since(start).Seconds()) + return nil + +} + +func launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { + start := time.Now() + scheduler := getSchedulerDeployment(cfg) + if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&scheduler); err != nil { + return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err) + } + + waitForPodsWithLabel(client, "self-hosted-"+kubeScheduler, false) + + schedulerStaticManifestPath := buildStaticManifestFilepath(kubeScheduler) + if err := os.RemoveAll(schedulerStaticManifestPath); err != nil { + return fmt.Errorf("unable to delete temporary scheduler manifest [%v]", err) + } + + fmt.Printf("[self-hosted] self-hosted kube-scheduler ready after %f seconds\n", time.Since(start).Seconds()) + return nil +} + +// waitForPodsWithLabel will lookup pods with the given label and wait until they are all +// reporting status as running. +func waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) { + wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { + // TODO: Do we need a stronger label link than this? + listOpts := v1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)} + apiPods, err := client.Pods(api.NamespaceSystem).List(listOpts) + if err != nil { + fmt.Printf("[self-hosted] error getting %s pods [%v]\n", appLabel, err) + return false, nil + } + fmt.Printf("[self-hosted] Found %d %s pods\n", len(apiPods.Items), appLabel) + + // TODO: HA + if int32(len(apiPods.Items)) != 1 { + return false, nil + } + for _, pod := range apiPods.Items { + fmt.Printf("[self-hosted] Pod %s status: %s\n", pod.Name, pod.Status.Phase) + if mustBeRunning && pod.Status.Phase != "Running" { + return false, nil + } + } + + return true, nil + }) +} + +// Sources from bootkube templates.go +func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet { + ds := ext.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "extensions/v1beta1", + Kind: "DaemonSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "self-hosted-" + kubeAPIServer, + Namespace: "kube-system", + Labels: map[string]string{"k8s-app": "self-hosted-" + kubeAPIServer}, + }, + Spec: ext.DaemonSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s-app": "self-hosted-" + kubeAPIServer, + "component": kubeAPIServer, + "tier": "control-plane", + }, + Annotations: map[string]string{ + v1.TolerationsAnnotationKey: getMasterToleration(), + }, + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster}, + HostNetwork: true, + Volumes: volumes, + Containers: []v1.Container{ + { + Name: "self-hosted-" + kubeAPIServer, + Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + Command: getAPIServerCommand(cfg, true), + Env: getSelfHostedAPIServerEnv(), + VolumeMounts: volumeMounts, + LivenessProbe: componentProbe(8080, "/healthz"), + Resources: componentResources("250m"), + }, + }, + }, + }, + }, + } + return ds +} + +func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment { + d := ext.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "extensions/v1beta1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "self-hosted-" + kubeControllerManager, + Namespace: "kube-system", + Labels: map[string]string{"k8s-app": "self-hosted-" + kubeControllerManager}, + }, + Spec: ext.DeploymentSpec{ + // TODO bootkube uses 2 replicas + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s-app": "self-hosted-" + kubeControllerManager, + "component": kubeControllerManager, + "tier": "control-plane", + }, + Annotations: map[string]string{ + v1.TolerationsAnnotationKey: getMasterToleration(), + }, + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster}, + HostNetwork: true, + Volumes: volumes, + Containers: []v1.Container{ + { + Name: "self-hosted-" + kubeControllerManager, + Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + Command: getControllerManagerCommand(cfg, true), + VolumeMounts: volumeMounts, + LivenessProbe: componentProbe(10252, "/healthz"), + Resources: componentResources("200m"), + Env: getProxyEnvVars(), + }, + }, + DNSPolicy: v1.DNSDefault, + }, + }, + }, + } + return d +} + +func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment { + d := ext.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "extensions/v1beta1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "self-hosted-" + kubeScheduler, + Namespace: "kube-system", + Labels: map[string]string{"k8s-app": "self-hosted-" + kubeScheduler}, + }, + Spec: ext.DeploymentSpec{ + // TODO bootkube uses 2 replicas + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s-app": "self-hosted-" + kubeScheduler, + "component": kubeScheduler, + "tier": "control-plane", + }, + Annotations: map[string]string{ + v1.TolerationsAnnotationKey: getMasterToleration(), + }, + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster}, + HostNetwork: true, + Containers: []v1.Container{ + { + Name: "self-hosted-" + kubeScheduler, + Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage), + Command: getSchedulerCommand(cfg, true), + LivenessProbe: componentProbe(10251, "/healthz"), + Resources: componentResources("100m"), + Env: getProxyEnvVars(), + }, + }, + }, + }, + }, + } + return d +} + +func buildStaticManifestFilepath(name string) string { + return path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests", name+".json") +} + +func getMasterToleration() string { + // Tolerate the master taint we add to our master nodes, as this can and should + // run there. + // TODO: Duplicated above + masterToleration, _ := json.Marshal([]v1.Toleration{{ + Key: "dedicated", + Value: "master", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }}) + return string(masterToleration) +} diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 77540f1f81c..3ac466bab4f 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -543,6 +543,7 @@ seccomp-profile-root secondary-node-eviction-rate secret-name secure-port +self-hosted serialize-image-pulls server-start-timeout service-account-key-file