kubeadm: first pass at self-hosted master components.

This commit is contained in:
Devan Goodwin 2016-12-07 08:51:49 -04:00 committed by Paulo Pires
parent 093ceb9528
commit 750cdb5bc2
No known key found for this signature in database
GPG Key ID: F3F6ED5C522EAA71
5 changed files with 539 additions and 49 deletions

View File

@ -21,10 +21,13 @@ import (
"io"
"io/ioutil"
"path"
"strconv"
"github.com/renstrom/dedent"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/runtime"
netutil "k8s.io/apimachinery/pkg/util/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
@ -32,12 +35,8 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig"
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/pkg/api"
@ -55,6 +54,9 @@ var (
kubeadm join --discovery %s
`)
deploymentStaticPod = "static-pods"
deploymentSelfHosted = "self-hosted"
deploymentTypes = []string{deploymentStaticPod, deploymentSelfHosted}
)
// NewCmdInit returns "kubeadm init" command.
@ -66,13 +68,14 @@ func NewCmdInit(out io.Writer) *cobra.Command {
var cfgPath string
var skipPreFlight bool
var deploymentType string // static pods, self-hosted, etc.
cmd := &cobra.Command{
Use: "init",
Short: "Run this in order to set up the Kubernetes master",
Run: func(cmd *cobra.Command, args []string) {
i, err := NewInit(cfgPath, &cfg, skipPreFlight)
i, err := NewInit(cfgPath, &cfg, skipPreFlight, deploymentType)
kubeadmutil.CheckErr(err)
kubeadmutil.CheckErr(i.Validate())
kubeadmutil.CheckErr(Validate(i.Cfg()))
kubeadmutil.CheckErr(i.Run(out))
},
}
@ -123,14 +126,15 @@ func NewCmdInit(out io.Writer) *cobra.Command {
"The discovery method kubeadm will use for connecting nodes to the master",
)
cmd.PersistentFlags().StringVar(
&deploymentType, "deployment", deploymentType,
fmt.Sprintf("specify a deployment type from %v", deploymentTypes),
)
return cmd
}
type Init struct {
cfg *kubeadmapi.MasterConfiguration
}
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool) (*Init, error) {
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool, deploymentType string) (Init, error) {
fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.")
@ -169,15 +173,64 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
// Try to start the kubelet service in case it's inactive
preflight.TryStartKubelet()
return &Init{cfg: cfg}, nil
// validate version argument
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
if err != nil {
if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion {
return nil, err
} else {
ver = kubeadmapiext.DefaultKubernetesFallbackVersion
}
}
cfg.KubernetesVersion = ver
fmt.Println("[init] Using Kubernetes version:", ver)
fmt.Println("[init] Using Authorization mode:", cfg.AuthorizationMode)
// Warn about the limitations with the current cloudprovider solution.
if cfg.CloudProvider != "" {
fmt.Println("WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.")
fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)")
}
var deploymentTypeValid bool
for _, supportedDT := range deploymentTypes {
if deploymentType == supportedDT {
deploymentTypeValid = true
}
}
if !deploymentTypeValid {
return nil, fmt.Errorf("%s is not a valid deployment type, you can use any of %v or leave unset to accept the default", deploymentType, deploymentTypes)
}
if deploymentType == deploymentSelfHosted {
fmt.Println("[init] Creating self-hosted Kubernetes deployment...")
return &SelfHostedInit{cfg: cfg}, nil
}
fmt.Println("[init] Creating static pod Kubernetes deployment...")
return &StaticPodInit{cfg: cfg}, nil
}
func (i *Init) Validate() error {
return validation.ValidateMasterConfiguration(i.cfg).ToAggregate()
func Validate(cfg *kubeadmapi.MasterConfiguration) error {
return validation.ValidateMasterConfiguration(cfg).ToAggregate()
}
// Init structs define implementations of the cluster setup for each supported
// delpoyment type.
type Init interface {
Cfg() *kubeadmapi.MasterConfiguration
Run(out io.Writer) error
}
type StaticPodInit struct {
cfg *kubeadmapi.MasterConfiguration
}
func (spi *StaticPodInit) Cfg() *kubeadmapi.MasterConfiguration {
return spi.cfg
}
// Run executes master node provisioning, including certificates, needed static pod manifests, etc.
func (i *Init) Run(out io.Writer) error {
func (i *StaticPodInit) Run(out io.Writer) error {
// PHASE 1: Generate certificates
caCert, err := certphase.CreatePKIAssets(i.cfg, kubeadmapi.GlobalEnvParams.HostPKIPath)
@ -254,6 +307,101 @@ func (i *Init) Run(out io.Writer) error {
return nil
}
// SelfHostedInit initializes a self-hosted cluster.
type SelfHostedInit struct {
cfg *kubeadmapi.MasterConfiguration
}
func (spi *SelfHostedInit) Cfg() *kubeadmapi.MasterConfiguration {
return spi.cfg
}
// Run executes master node provisioning, including certificates, needed pod manifests, etc.
func (i *SelfHostedInit) Run(out io.Writer) error {
// Validate token if any, otherwise generate
if i.cfg.Discovery.Token != nil {
if i.cfg.Discovery.Token.ID != "" && i.cfg.Discovery.Token.Secret != "" {
fmt.Printf("[token-discovery] A token has been provided, validating [%s]\n", kubeadmutil.BearerToken(i.cfg.Discovery.Token))
if valid, err := kubeadmutil.ValidateToken(i.cfg.Discovery.Token); valid == false {
return err
}
} else {
fmt.Println("[token-discovery] A token has not been provided, generating one")
if err := kubeadmutil.GenerateToken(i.cfg.Discovery.Token); err != nil {
return err
}
}
// Make sure there is at least one address
if len(i.cfg.Discovery.Token.Addresses) == 0 {
ip, err := netutil.ChooseHostInterface()
if err != nil {
return err
}
i.cfg.Discovery.Token.Addresses = []string{ip.String() + ":" + strconv.Itoa(kubeadmapiext.DefaultDiscoveryBindPort)}
}
if err := kubemaster.CreateTokenAuthFile(kubeadmutil.BearerToken(i.cfg.Discovery.Token)); err != nil {
return err
}
}
// PHASE 1: Generate certificates
caCert, err := certphase.CreatePKIAssets(i.cfg, kubeadmapi.GlobalEnvParams.HostPKIPath)
if err != nil {
return err
}
// PHASE 2: Generate kubeconfig files for the admin and the kubelet
// TODO this is not great, but there is only one address we can use here
// so we'll pick the first one, there is much of chance to have an empty
// slice by the time this gets called
masterEndpoint := fmt.Sprintf("https://%s:%d", i.cfg.API.AdvertiseAddresses[0], i.cfg.API.Port)
err = kubeconfigphase.CreateAdminAndKubeletKubeConfig(masterEndpoint, kubeadmapi.GlobalEnvParams.HostPKIPath, kubeadmapi.GlobalEnvParams.KubernetesDir)
if err != nil {
return err
}
// Phase 3: Bootstrap the control plane
if err := kubemaster.WriteStaticPodManifests(i.cfg); err != nil {
return err
}
client, err := kubemaster.CreateClientAndWaitForAPI(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfigphase.AdminKubeConfigFileName))
if err != nil {
return err
}
if err := kubemaster.UpdateMasterRoleLabelsAndTaints(client, false); err != nil {
return err
}
// Temporary control plane is up, now we create our self hosted control
// plane components and remove the static manifests:
fmt.Println("[init] Creating self-hosted control plane...")
if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil {
return err
}
if i.cfg.Discovery.Token != nil {
fmt.Printf("[token-discovery] Using token: %s\n", kubeadmutil.BearerToken(i.cfg.Discovery.Token))
if err := kubemaster.CreateDiscoveryDeploymentAndSecret(i.cfg, client, caCert); err != nil {
return err
}
if err := kubeadmutil.UpdateOrCreateToken(client, i.cfg.Discovery.Token, kubeadmutil.DefaultTokenDuration); err != nil {
return err
}
}
if err := kubemaster.CreateEssentialAddons(i.cfg, client); err != nil {
return err
}
fmt.Fprintf(out, initDoneMsgf, generateJoinArgs(i.cfg))
return nil
}
// generateJoinArgs generates kubeadm join arguments
func generateJoinArgs(cfg *kubeadmapi.MasterConfiguration) string {
return discovery.NewDiscoveryValue(&cfg.Discovery).String()

View File

@ -15,6 +15,7 @@ go_library(
"apiclient.go",
"discovery.go",
"manifests.go",
"selfhosted.go",
"tokens.go",
],
tags = ["automanaged"],

View File

@ -65,38 +65,12 @@ func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) {
if err != nil {
return nil, err
}
fmt.Println("[apiclient] Created API client, waiting for the control plane to become ready")
start := time.Now()
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
// TODO: use /healthz API instead of this
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
if err != nil {
if apierrs.IsForbidden(err) {
fmt.Println("[apiclient] Waiting for API server authorization")
}
return false, nil
}
// TODO(phase2) must revisit this when we implement HA
if len(cs.Items) < 3 {
fmt.Println("[apiclient] Not all control plane components are ready yet")
return false, nil
}
for _, item := range cs.Items {
for _, condition := range item.Conditions {
if condition.Type != v1.ComponentHealthy {
fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
return false, nil
}
}
}
fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
return true, nil
})
WaitForAPI(client)
fmt.Println("[apiclient] Waiting for at least one node to register and become ready")
start = time.Now()
start := time.Now()
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
nodeList, err := client.Nodes().List(v1.ListOptions{})
if err != nil {
@ -128,6 +102,36 @@ func standardLabels(n string) map[string]string {
}
}
func WaitForAPI(client *clientset.Clientset) {
start := time.Now()
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
// TODO: use /healthz API instead of this
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
if err != nil {
if apierrs.IsForbidden(err) {
fmt.Print("\r[apiclient] Waiting for the API server to create RBAC policies")
}
return false, nil
}
fmt.Println("\n[apiclient] RBAC policies created")
// TODO(phase2) must revisit this when we implement HA
if len(cs.Items) < 3 {
return false, nil
}
for _, item := range cs.Items {
for _, condition := range item.Conditions {
if condition.Type != v1.ComponentHealthy {
fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
return false, nil
}
}
}
fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
return true, nil
})
}
func NewDaemonSet(daemonName string, podSpec v1.PodSpec) *extensions.DaemonSet {
l := standardLabels(daemonName)
return &extensions.DaemonSet{

View File

@ -98,10 +98,12 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
Env: getProxyEnvVars(),
}, volumes...),
kubeScheduler: componentPod(api.Container{
Name: kubeScheduler,
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getSchedulerCommand(cfg),
LivenessProbe: componentProbe(10251, "/healthz"),
Name: kubeScheduler,
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
// TODO: Using non-standard port here so self-hosted scheduler can come up:
// Use the regular port if this is not going to be a self-hosted deployment.
Command: getSchedulerCommand(cfg, 10260),
LivenessProbe: componentProbe(10260, "/healthz"),
Resources: componentResources("100m"),
Env: getProxyEnvVars(),
}),
@ -389,11 +391,12 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
return command
}
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, schedulerPort int) []string {
return append(getComponentBaseCommand(scheduler),
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
fmt.Sprintf("--port=%d", schedulerPort),
)
}

View File

@ -0,0 +1,334 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"encoding/json"
"fmt"
"os"
"path"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
ext "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
)
func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
volumes := []v1.Volume{k8sVolume(cfg)}
volumeMounts := []v1.VolumeMount{k8sVolumeMount()}
if isCertsVolumeMountNeeded() {
volumes = append(volumes, certsVolume(cfg))
volumeMounts = append(volumeMounts, certsVolumeMount())
}
if isPkiVolumeMountNeeded() {
volumes = append(volumes, pkiVolume(cfg))
volumeMounts = append(volumeMounts, pkiVolumeMount())
}
if err := LaunchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil {
return err
}
if err := LaunchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil {
return err
}
if err := LaunchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil {
return err
}
return nil
}
func LaunchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
start := time.Now()
apiServer := getAPIServerDS(cfg, volumes, volumeMounts)
if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(&apiServer); err != nil {
return fmt.Errorf("failed to create self-hosted %q daemon set [%v]", kubeAPIServer, err)
}
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
// TODO: This might be pointless, checking the pods is probably enough.
// It does however get us a count of how many there should be which may be useful
// with HA.
apiDS, err := client.DaemonSets(api.NamespaceSystem).Get(kubeAPIServer,
metav1.GetOptions{})
if err != nil {
fmt.Println("[debug] error getting apiserver DaemonSet:", err)
return false, nil
}
fmt.Printf("[debug] %s DaemonSet current=%d, desired=%d\n",
kubeAPIServer,
apiDS.Status.CurrentNumberScheduled,
apiDS.Status.DesiredNumberScheduled)
if apiDS.Status.CurrentNumberScheduled != apiDS.Status.DesiredNumberScheduled {
return false, nil
}
return true, nil
})
waitForPodsWithLabel(client, kubeAPIServer)
apiServerStaticManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir,
"manifests", kubeAPIServer+".json")
if err := os.Remove(apiServerStaticManifestPath); err != nil {
return fmt.Errorf("unable to delete temporary API server manifest [%v]", err)
}
// Wait until kubernetes detects the static pod removal and our newly created
// API server comes online:
// TODO: Should we verify that either the API is down, or the static apiserver pod is gone before
// waiting?
WaitForAPI(client)
fmt.Printf("[debug] self-hosted kube-apiserver ready after %f seconds\n", time.Since(start).Seconds())
return nil
}
func LaunchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
start := time.Now()
ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts)
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&ctrlMgr); err != nil {
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err)
}
waitForPodsWithLabel(client, kubeControllerManager)
ctrlMgrStaticManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir,
"manifests", kubeControllerManager+".json")
if err := os.Remove(ctrlMgrStaticManifestPath); err != nil {
return fmt.Errorf("unable to delete temporary controller manager manifest [%v]", err)
}
fmt.Printf("[debug] self-hosted kube-controller-manager ready after %f seconds\n", time.Since(start).Seconds())
return nil
}
func LaunchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
start := time.Now()
scheduler := getSchedulerDeployment(cfg)
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&scheduler); err != nil {
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err)
}
waitForPodsWithLabel(client, kubeScheduler)
schedulerStaticManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir,
"manifests", kubeScheduler+".json")
if err := os.Remove(schedulerStaticManifestPath); err != nil {
return fmt.Errorf("unable to delete temporary scheduler manifest [%v]", err)
}
fmt.Printf("[debug] self-hosted kube-scheduler ready after %f seconds\n", time.Since(start).Seconds())
return nil
}
// waitForPodsWithLabel will lookup pods with the given label and wait until they are all
// reporting status as running.
func waitForPodsWithLabel(client *clientset.Clientset, appLabel string) {
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
// TODO: Do we need a stronger label link than this?
listOpts := v1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)}
apiPods, err := client.Pods(api.NamespaceSystem).List(listOpts)
if err != nil {
fmt.Printf("[debug] error getting %s pods [%v]\n", appLabel, err)
return false, nil
}
fmt.Printf("[debug] Found %d %s pods\n", len(apiPods.Items), appLabel)
// TODO: HA
if int32(len(apiPods.Items)) != 1 {
return false, nil
}
for _, pod := range apiPods.Items {
fmt.Printf("[debug] Pod %s status: %s\n", pod.Name, pod.Status.Phase)
if pod.Status.Phase != "Running" {
return false, nil
}
}
return true, nil
})
return
}
// Sources from bootkube templates.go
func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration,
volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet {
ds := ext.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "extensions/v1beta1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: kubeAPIServer,
Namespace: "kube-system",
//Labels: map[string]string{"k8s-app": "kube-apiserver"},
},
Spec: ext.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
// TODO: taken from bootkube, appears to be essential, without this
// we don't get an apiserver pod...
"k8s-app": kubeAPIServer,
"component": kubeAPIServer,
"tier": "control-plane",
},
},
Spec: v1.PodSpec{
// TODO: Make sure masters get this label
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
HostNetwork: true,
Volumes: volumes,
Containers: []v1.Container{
{
Name: kubeAPIServer,
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getAPIServerCommand(cfg),
Env: getProxyEnvVars(),
VolumeMounts: volumeMounts,
LivenessProbe: componentProbe(8080, "/healthz"),
Resources: componentResources("250m"),
},
},
},
},
},
}
return ds
}
func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration,
volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment {
cmDep := ext.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "extensions/v1beta1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: kubeControllerManager,
Namespace: "kube-system",
},
Spec: ext.DeploymentSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
// TODO: taken from bootkube, appears to be essential
"k8s-app": kubeControllerManager,
"component": kubeControllerManager,
"tier": "control-plane",
},
Annotations: map[string]string{
v1.TolerationsAnnotationKey: getMasterToleration(),
},
},
Spec: v1.PodSpec{
// TODO: Make sure masters get this label
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
HostNetwork: true,
Volumes: volumes,
Containers: []v1.Container{
{
Name: kubeControllerManager,
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getControllerManagerCommand(cfg),
VolumeMounts: volumeMounts,
LivenessProbe: componentProbe(10252, "/healthz"),
Resources: componentResources("200m"),
Env: getProxyEnvVars(),
},
},
},
},
},
}
return cmDep
}
func getMasterToleration() string {
// Tolerate the master taint we add to our master nodes, as this can and should
// run there.
// TODO: Duplicated above
masterToleration, _ := json.Marshal([]v1.Toleration{{
Key: "dedicated",
Value: "master",
Operator: v1.TolerationOpEqual,
Effect: v1.TaintEffectNoSchedule,
}})
return string(masterToleration)
}
func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment {
cmDep := ext.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "extensions/v1beta1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: kubeScheduler,
Namespace: "kube-system",
},
Spec: ext.DeploymentSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"k8s-app": kubeScheduler,
"component": kubeScheduler,
"tier": "control-plane",
},
Annotations: map[string]string{
v1.TolerationsAnnotationKey: getMasterToleration(),
},
},
Spec: v1.PodSpec{
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
HostNetwork: true,
Containers: []v1.Container{
{
Name: kubeScheduler,
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getSchedulerCommand(cfg, 10251),
LivenessProbe: componentProbe(10251, "/healthz"),
Resources: componentResources("100m"),
Env: getProxyEnvVars(),
},
},
},
},
},
}
return cmDep
}