mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
kubeadm: add self-hosted as optional deployment type.
This commit is contained in:
parent
750cdb5bc2
commit
c80c0275da
@ -173,18 +173,6 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
|||||||
// Try to start the kubelet service in case it's inactive
|
// Try to start the kubelet service in case it's inactive
|
||||||
preflight.TryStartKubelet()
|
preflight.TryStartKubelet()
|
||||||
|
|
||||||
// validate version argument
|
|
||||||
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
|
|
||||||
if err != nil {
|
|
||||||
if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
ver = kubeadmapiext.DefaultKubernetesFallbackVersion
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.KubernetesVersion = ver
|
|
||||||
fmt.Println("[init] Using Kubernetes version:", ver)
|
|
||||||
fmt.Println("[init] Using Authorization mode:", cfg.AuthorizationMode)
|
|
||||||
|
|
||||||
// Warn about the limitations with the current cloudprovider solution.
|
// Warn about the limitations with the current cloudprovider solution.
|
||||||
if cfg.CloudProvider != "" {
|
if cfg.CloudProvider != "" {
|
||||||
|
@ -109,11 +109,11 @@ func WaitForAPI(client *clientset.Clientset) {
|
|||||||
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
|
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsForbidden(err) {
|
if apierrs.IsForbidden(err) {
|
||||||
fmt.Print("\r[apiclient] Waiting for the API server to create RBAC policies")
|
fmt.Println("[apiclient] Waiting for API server authorization")
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
fmt.Println("\n[apiclient] RBAC policies created")
|
|
||||||
// TODO(phase2) must revisit this when we implement HA
|
// TODO(phase2) must revisit this when we implement HA
|
||||||
if len(cs.Items) < 3 {
|
if len(cs.Items) < 3 {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -82,7 +82,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
|||||||
kubeAPIServer: componentPod(api.Container{
|
kubeAPIServer: componentPod(api.Container{
|
||||||
Name: kubeAPIServer,
|
Name: kubeAPIServer,
|
||||||
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||||
Command: getAPIServerCommand(cfg),
|
Command: getAPIServerCommand(cfg, false),
|
||||||
VolumeMounts: volumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
LivenessProbe: componentProbe(8080, "/healthz"),
|
LivenessProbe: componentProbe(8080, "/healthz"),
|
||||||
Resources: componentResources("250m"),
|
Resources: componentResources("250m"),
|
||||||
@ -91,19 +91,17 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
|||||||
kubeControllerManager: componentPod(api.Container{
|
kubeControllerManager: componentPod(api.Container{
|
||||||
Name: kubeControllerManager,
|
Name: kubeControllerManager,
|
||||||
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||||
Command: getControllerManagerCommand(cfg),
|
Command: getControllerManagerCommand(cfg, false),
|
||||||
VolumeMounts: volumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
LivenessProbe: componentProbe(10252, "/healthz"),
|
LivenessProbe: componentProbe(10252, "/healthz"),
|
||||||
Resources: componentResources("200m"),
|
Resources: componentResources("200m"),
|
||||||
Env: getProxyEnvVars(),
|
Env: getProxyEnvVars(),
|
||||||
}, volumes...),
|
}, volumes...),
|
||||||
kubeScheduler: componentPod(api.Container{
|
kubeScheduler: componentPod(api.Container{
|
||||||
Name: kubeScheduler,
|
Name: kubeScheduler,
|
||||||
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||||
// TODO: Using non-standard port here so self-hosted scheduler can come up:
|
Command: getSchedulerCommand(cfg, false),
|
||||||
// Use the regular port if this is not going to be a self-hosted deployment.
|
LivenessProbe: componentProbe(10251, "/healthz"),
|
||||||
Command: getSchedulerCommand(cfg, 10260),
|
|
||||||
LivenessProbe: componentProbe(10260, "/healthz"),
|
|
||||||
Resources: componentResources("100m"),
|
Resources: componentResources("100m"),
|
||||||
Env: getProxyEnvVars(),
|
Env: getProxyEnvVars(),
|
||||||
}),
|
}),
|
||||||
@ -219,6 +217,23 @@ func pkiVolumeMount() api.VolumeMount {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flockVolume() api.Volume {
|
||||||
|
return api.Volume{
|
||||||
|
Name: "var-lock",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
HostPath: &api.HostPathVolumeSource{Path: "/var/lock"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func flockVolumeMount() api.VolumeMount {
|
||||||
|
return api.VolumeMount{
|
||||||
|
Name: "var-lock",
|
||||||
|
MountPath: "/var/lock",
|
||||||
|
ReadOnly: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
|
func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
|
||||||
return api.Volume{
|
return api.Volume{
|
||||||
Name: "k8s",
|
Name: "k8s",
|
||||||
@ -286,8 +301,15 @@ func getComponentBaseCommand(component string) []string {
|
|||||||
return []string{"kube-" + component}
|
return []string{"kube-" + component}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
|
||||||
command := append(getComponentBaseCommand(apiServer),
|
var command []string
|
||||||
|
|
||||||
|
// self-hosted apiserver needs to wait on a lock
|
||||||
|
if selfHosted {
|
||||||
|
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
|
||||||
|
}
|
||||||
|
|
||||||
|
command = append(getComponentBaseCommand(apiServer),
|
||||||
"--insecure-bind-address=127.0.0.1",
|
"--insecure-bind-address=127.0.0.1",
|
||||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||||
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
|
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
|
||||||
@ -312,7 +334,11 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
|||||||
|
|
||||||
// Use first address we are given
|
// Use first address we are given
|
||||||
if len(cfg.API.AdvertiseAddresses) > 0 {
|
if len(cfg.API.AdvertiseAddresses) > 0 {
|
||||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
if selfHosted {
|
||||||
|
command = append(command, "--advertise-address=$(POD_IP)")
|
||||||
|
} else {
|
||||||
|
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.KubernetesVersion) != 0 {
|
if len(cfg.KubernetesVersion) != 0 {
|
||||||
@ -361,8 +387,15 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
|||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
|
||||||
command := append(getComponentBaseCommand(controllerManager),
|
var command []string
|
||||||
|
|
||||||
|
// self-hosted controller-manager needs to wait on a lock
|
||||||
|
if selfHosted {
|
||||||
|
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/controller-manager.lock"}
|
||||||
|
}
|
||||||
|
|
||||||
|
command = append(getComponentBaseCommand(controllerManager),
|
||||||
"--address=127.0.0.1",
|
"--address=127.0.0.1",
|
||||||
"--leader-elect",
|
"--leader-elect",
|
||||||
"--master=127.0.0.1:8080",
|
"--master=127.0.0.1:8080",
|
||||||
@ -388,16 +421,25 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
|||||||
if cfg.Networking.PodSubnet != "" {
|
if cfg.Networking.PodSubnet != "" {
|
||||||
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
|
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
|
||||||
}
|
}
|
||||||
|
|
||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, schedulerPort int) []string {
|
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
|
||||||
return append(getComponentBaseCommand(scheduler),
|
var command []string
|
||||||
|
|
||||||
|
// self-hosted apiserver needs to wait on a lock
|
||||||
|
if selfHosted {
|
||||||
|
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
|
||||||
|
}
|
||||||
|
|
||||||
|
command = append(getComponentBaseCommand(scheduler),
|
||||||
"--address=127.0.0.1",
|
"--address=127.0.0.1",
|
||||||
"--leader-elect",
|
"--leader-elect",
|
||||||
"--master=127.0.0.1:8080",
|
"--master=127.0.0.1:8080",
|
||||||
fmt.Sprintf("--port=%d", schedulerPort),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||||
@ -421,3 +463,16 @@ func getProxyEnvVars() []api.EnvVar {
|
|||||||
}
|
}
|
||||||
return envs
|
return envs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getSelfHostedAPIServerEnv() []api.EnvVar {
|
||||||
|
podIPEnvVar := api.EnvVar{
|
||||||
|
Name: "POD_IP",
|
||||||
|
ValueFrom: &api.EnvVarSource{
|
||||||
|
FieldRef: &api.ObjectFieldSelector{
|
||||||
|
FieldPath: "status.podIP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(getProxyEnvVars(), podIPEnvVar)
|
||||||
|
}
|
||||||
|
@ -454,7 +454,7 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
actual := getAPIServerCommand(rt.cfg)
|
actual := getAPIServerCommand(rt.cfg, false)
|
||||||
for i := range actual {
|
for i := range actual {
|
||||||
if actual[i] != rt.expected[i] {
|
if actual[i] != rt.expected[i] {
|
||||||
t.Errorf(
|
t.Errorf(
|
||||||
@ -523,7 +523,7 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
actual := getControllerManagerCommand(rt.cfg)
|
actual := getControllerManagerCommand(rt.cfg, false)
|
||||||
for i := range actual {
|
for i := range actual {
|
||||||
if actual[i] != rt.expected[i] {
|
if actual[i] != rt.expected[i] {
|
||||||
t.Errorf(
|
t.Errorf(
|
||||||
@ -553,7 +553,7 @@ func TestGetSchedulerCommand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
actual := getSchedulerCommand(rt.cfg)
|
actual := getSchedulerCommand(rt.cfg, false)
|
||||||
for i := range actual {
|
for i := range actual {
|
||||||
if actual[i] != rt.expected[i] {
|
if actual[i] != rt.expected[i] {
|
||||||
t.Errorf(
|
t.Errorf(
|
||||||
|
@ -46,21 +46,26 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *c
|
|||||||
volumeMounts = append(volumeMounts, pkiVolumeMount())
|
volumeMounts = append(volumeMounts, pkiVolumeMount())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := LaunchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil {
|
// Need lock for self-hosted
|
||||||
|
volumes = append(volumes, flockVolume())
|
||||||
|
volumeMounts = append(volumeMounts, flockVolumeMount())
|
||||||
|
|
||||||
|
if err := launchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := LaunchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil {
|
if err := launchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := LaunchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil {
|
if err := launchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func LaunchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
func launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
apiServer := getAPIServerDS(cfg, volumes, volumeMounts)
|
apiServer := getAPIServerDS(cfg, volumes, volumeMounts)
|
||||||
@ -72,13 +77,13 @@ func LaunchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clie
|
|||||||
// TODO: This might be pointless, checking the pods is probably enough.
|
// TODO: This might be pointless, checking the pods is probably enough.
|
||||||
// It does however get us a count of how many there should be which may be useful
|
// It does however get us a count of how many there should be which may be useful
|
||||||
// with HA.
|
// with HA.
|
||||||
apiDS, err := client.DaemonSets(api.NamespaceSystem).Get(kubeAPIServer,
|
apiDS, err := client.DaemonSets(api.NamespaceSystem).Get("self-hosted-"+kubeAPIServer,
|
||||||
metav1.GetOptions{})
|
metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("[debug] error getting apiserver DaemonSet:", err)
|
fmt.Println("[self-hosted] error getting apiserver DaemonSet:", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
fmt.Printf("[debug] %s DaemonSet current=%d, desired=%d\n",
|
fmt.Printf("[self-hosted] %s DaemonSet current=%d, desired=%d\n",
|
||||||
kubeAPIServer,
|
kubeAPIServer,
|
||||||
apiDS.Status.CurrentNumberScheduled,
|
apiDS.Status.CurrentNumberScheduled,
|
||||||
apiDS.Status.DesiredNumberScheduled)
|
apiDS.Status.DesiredNumberScheduled)
|
||||||
@ -90,25 +95,22 @@ func LaunchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clie
|
|||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
waitForPodsWithLabel(client, kubeAPIServer)
|
// Wait for self-hosted API server to take ownership
|
||||||
|
waitForPodsWithLabel(client, "self-hosted-"+kubeAPIServer, true)
|
||||||
|
|
||||||
apiServerStaticManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir,
|
// Remove temporary API server
|
||||||
"manifests", kubeAPIServer+".json")
|
apiServerStaticManifestPath := buildStaticManifestFilepath(kubeAPIServer)
|
||||||
if err := os.Remove(apiServerStaticManifestPath); err != nil {
|
if err := os.RemoveAll(apiServerStaticManifestPath); err != nil {
|
||||||
return fmt.Errorf("unable to delete temporary API server manifest [%v]", err)
|
return fmt.Errorf("unable to delete temporary API server manifest [%v]", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait until kubernetes detects the static pod removal and our newly created
|
|
||||||
// API server comes online:
|
|
||||||
// TODO: Should we verify that either the API is down, or the static apiserver pod is gone before
|
|
||||||
// waiting?
|
|
||||||
WaitForAPI(client)
|
WaitForAPI(client)
|
||||||
|
|
||||||
fmt.Printf("[debug] self-hosted kube-apiserver ready after %f seconds\n", time.Since(start).Seconds())
|
fmt.Printf("[self-hosted] self-hosted kube-apiserver ready after %f seconds\n", time.Since(start).Seconds())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func LaunchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
func launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts)
|
ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts)
|
||||||
@ -116,105 +118,98 @@ func LaunchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, clie
|
|||||||
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err)
|
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
waitForPodsWithLabel(client, kubeControllerManager)
|
waitForPodsWithLabel(client, "self-hosted-"+kubeControllerManager, false)
|
||||||
|
|
||||||
ctrlMgrStaticManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir,
|
ctrlMgrStaticManifestPath := buildStaticManifestFilepath(kubeControllerManager)
|
||||||
"manifests", kubeControllerManager+".json")
|
if err := os.RemoveAll(ctrlMgrStaticManifestPath); err != nil {
|
||||||
if err := os.Remove(ctrlMgrStaticManifestPath); err != nil {
|
|
||||||
return fmt.Errorf("unable to delete temporary controller manager manifest [%v]", err)
|
return fmt.Errorf("unable to delete temporary controller manager manifest [%v]", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("[debug] self-hosted kube-controller-manager ready after %f seconds\n", time.Since(start).Seconds())
|
fmt.Printf("[self-hosted] self-hosted kube-controller-manager ready after %f seconds\n", time.Since(start).Seconds())
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func LaunchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
func launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
scheduler := getSchedulerDeployment(cfg)
|
scheduler := getSchedulerDeployment(cfg)
|
||||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&scheduler); err != nil {
|
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&scheduler); err != nil {
|
||||||
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err)
|
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
waitForPodsWithLabel(client, kubeScheduler)
|
waitForPodsWithLabel(client, "self-hosted-"+kubeScheduler, false)
|
||||||
|
|
||||||
schedulerStaticManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir,
|
schedulerStaticManifestPath := buildStaticManifestFilepath(kubeScheduler)
|
||||||
"manifests", kubeScheduler+".json")
|
if err := os.RemoveAll(schedulerStaticManifestPath); err != nil {
|
||||||
if err := os.Remove(schedulerStaticManifestPath); err != nil {
|
|
||||||
return fmt.Errorf("unable to delete temporary scheduler manifest [%v]", err)
|
return fmt.Errorf("unable to delete temporary scheduler manifest [%v]", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("[debug] self-hosted kube-scheduler ready after %f seconds\n", time.Since(start).Seconds())
|
fmt.Printf("[self-hosted] self-hosted kube-scheduler ready after %f seconds\n", time.Since(start).Seconds())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForPodsWithLabel will lookup pods with the given label and wait until they are all
|
// waitForPodsWithLabel will lookup pods with the given label and wait until they are all
|
||||||
// reporting status as running.
|
// reporting status as running.
|
||||||
func waitForPodsWithLabel(client *clientset.Clientset, appLabel string) {
|
func waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) {
|
||||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||||
// TODO: Do we need a stronger label link than this?
|
// TODO: Do we need a stronger label link than this?
|
||||||
listOpts := v1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)}
|
listOpts := v1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)}
|
||||||
apiPods, err := client.Pods(api.NamespaceSystem).List(listOpts)
|
apiPods, err := client.Pods(api.NamespaceSystem).List(listOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("[debug] error getting %s pods [%v]\n", appLabel, err)
|
fmt.Printf("[self-hosted] error getting %s pods [%v]\n", appLabel, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
fmt.Printf("[debug] Found %d %s pods\n", len(apiPods.Items), appLabel)
|
fmt.Printf("[self-hosted] Found %d %s pods\n", len(apiPods.Items), appLabel)
|
||||||
|
|
||||||
// TODO: HA
|
// TODO: HA
|
||||||
if int32(len(apiPods.Items)) != 1 {
|
if int32(len(apiPods.Items)) != 1 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
for _, pod := range apiPods.Items {
|
for _, pod := range apiPods.Items {
|
||||||
fmt.Printf("[debug] Pod %s status: %s\n", pod.Name, pod.Status.Phase)
|
fmt.Printf("[self-hosted] Pod %s status: %s\n", pod.Name, pod.Status.Phase)
|
||||||
if pod.Status.Phase != "Running" {
|
if mustBeRunning && pod.Status.Phase != "Running" {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sources from bootkube templates.go
|
// Sources from bootkube templates.go
|
||||||
func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration,
|
func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet {
|
||||||
volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet {
|
|
||||||
|
|
||||||
ds := ext.DaemonSet{
|
ds := ext.DaemonSet{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: "extensions/v1beta1",
|
APIVersion: "extensions/v1beta1",
|
||||||
Kind: "DaemonSet",
|
Kind: "DaemonSet",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: kubeAPIServer,
|
Name: "self-hosted-" + kubeAPIServer,
|
||||||
Namespace: "kube-system",
|
Namespace: "kube-system",
|
||||||
//Labels: map[string]string{"k8s-app": "kube-apiserver"},
|
Labels: map[string]string{"k8s-app": "self-hosted-" + kubeAPIServer},
|
||||||
},
|
},
|
||||||
Spec: ext.DaemonSetSpec{
|
Spec: ext.DaemonSetSpec{
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
// TODO: taken from bootkube, appears to be essential, without this
|
"k8s-app": "self-hosted-" + kubeAPIServer,
|
||||||
// we don't get an apiserver pod...
|
|
||||||
"k8s-app": kubeAPIServer,
|
|
||||||
"component": kubeAPIServer,
|
"component": kubeAPIServer,
|
||||||
"tier": "control-plane",
|
"tier": "control-plane",
|
||||||
},
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
v1.TolerationsAnnotationKey: getMasterToleration(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
// TODO: Make sure masters get this label
|
|
||||||
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
||||||
HostNetwork: true,
|
HostNetwork: true,
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: kubeAPIServer,
|
Name: "self-hosted-" + kubeAPIServer,
|
||||||
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||||
Command: getAPIServerCommand(cfg),
|
Command: getAPIServerCommand(cfg, true),
|
||||||
Env: getProxyEnvVars(),
|
Env: getSelfHostedAPIServerEnv(),
|
||||||
VolumeMounts: volumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
LivenessProbe: componentProbe(8080, "/healthz"),
|
LivenessProbe: componentProbe(8080, "/healthz"),
|
||||||
Resources: componentResources("250m"),
|
Resources: componentResources("250m"),
|
||||||
@ -227,24 +222,23 @@ func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration,
|
|||||||
return ds
|
return ds
|
||||||
}
|
}
|
||||||
|
|
||||||
func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration,
|
func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment {
|
||||||
volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment {
|
d := ext.Deployment{
|
||||||
|
|
||||||
cmDep := ext.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: "extensions/v1beta1",
|
APIVersion: "extensions/v1beta1",
|
||||||
Kind: "Deployment",
|
Kind: "Deployment",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: kubeControllerManager,
|
Name: "self-hosted-" + kubeControllerManager,
|
||||||
Namespace: "kube-system",
|
Namespace: "kube-system",
|
||||||
|
Labels: map[string]string{"k8s-app": "self-hosted-" + kubeControllerManager},
|
||||||
},
|
},
|
||||||
Spec: ext.DeploymentSpec{
|
Spec: ext.DeploymentSpec{
|
||||||
|
// TODO bootkube uses 2 replicas
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
// TODO: taken from bootkube, appears to be essential
|
"k8s-app": "self-hosted-" + kubeControllerManager,
|
||||||
"k8s-app": kubeControllerManager,
|
|
||||||
"component": kubeControllerManager,
|
"component": kubeControllerManager,
|
||||||
"tier": "control-plane",
|
"tier": "control-plane",
|
||||||
},
|
},
|
||||||
@ -253,27 +247,74 @@ func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
// TODO: Make sure masters get this label
|
|
||||||
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
||||||
HostNetwork: true,
|
HostNetwork: true,
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
|
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: kubeControllerManager,
|
Name: "self-hosted-" + kubeControllerManager,
|
||||||
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||||
Command: getControllerManagerCommand(cfg),
|
Command: getControllerManagerCommand(cfg, true),
|
||||||
VolumeMounts: volumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
LivenessProbe: componentProbe(10252, "/healthz"),
|
LivenessProbe: componentProbe(10252, "/healthz"),
|
||||||
Resources: componentResources("200m"),
|
Resources: componentResources("200m"),
|
||||||
Env: getProxyEnvVars(),
|
Env: getProxyEnvVars(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
DNSPolicy: v1.DNSDefault,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment {
|
||||||
|
d := ext.Deployment{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "self-hosted-" + kubeScheduler,
|
||||||
|
Namespace: "kube-system",
|
||||||
|
Labels: map[string]string{"k8s-app": "self-hosted-" + kubeScheduler},
|
||||||
|
},
|
||||||
|
Spec: ext.DeploymentSpec{
|
||||||
|
// TODO bootkube uses 2 replicas
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"k8s-app": "self-hosted-" + kubeScheduler,
|
||||||
|
"component": kubeScheduler,
|
||||||
|
"tier": "control-plane",
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
v1.TolerationsAnnotationKey: getMasterToleration(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
||||||
|
HostNetwork: true,
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "self-hosted-" + kubeScheduler,
|
||||||
|
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||||
|
Command: getSchedulerCommand(cfg, true),
|
||||||
|
LivenessProbe: componentProbe(10251, "/healthz"),
|
||||||
|
Resources: componentResources("100m"),
|
||||||
|
Env: getProxyEnvVars(),
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return cmDep
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildStaticManifestFilepath(name string) string {
|
||||||
|
return path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests", name+".json")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMasterToleration() string {
|
func getMasterToleration() string {
|
||||||
@ -288,47 +329,3 @@ func getMasterToleration() string {
|
|||||||
}})
|
}})
|
||||||
return string(masterToleration)
|
return string(masterToleration)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment {
|
|
||||||
|
|
||||||
cmDep := ext.Deployment{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
APIVersion: "extensions/v1beta1",
|
|
||||||
Kind: "Deployment",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: kubeScheduler,
|
|
||||||
Namespace: "kube-system",
|
|
||||||
},
|
|
||||||
Spec: ext.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"k8s-app": kubeScheduler,
|
|
||||||
"component": kubeScheduler,
|
|
||||||
"tier": "control-plane",
|
|
||||||
},
|
|
||||||
Annotations: map[string]string{
|
|
||||||
v1.TolerationsAnnotationKey: getMasterToleration(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
|
||||||
HostNetwork: true,
|
|
||||||
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: kubeScheduler,
|
|
||||||
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
|
||||||
Command: getSchedulerCommand(cfg, 10251),
|
|
||||||
LivenessProbe: componentProbe(10251, "/healthz"),
|
|
||||||
Resources: componentResources("100m"),
|
|
||||||
Env: getProxyEnvVars(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return cmDep
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user