1
0
mirror of https://github.com/rancher/rke.git synced 2025-04-28 19:43:26 +00:00
rke/cluster/cluster.go

346 lines
13 KiB
Go
Raw Normal View History

package cluster
import (
"context"
"fmt"
"net"
"strings"
"github.com/rancher/rke/authz"
"github.com/rancher/rke/docker"
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/log"
"github.com/rancher/rke/pki"
"github.com/rancher/rke/services"
2017-12-05 16:55:58 +00:00
"github.com/rancher/types/apis/management.cattle.io/v3"
2017-11-13 21:28:38 +00:00
"github.com/sirupsen/logrus"
2018-02-01 21:43:09 +00:00
"golang.org/x/sync/errgroup"
2017-12-05 16:55:58 +00:00
"gopkg.in/yaml.v2"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
)
type Cluster struct {
2017-12-05 16:55:58 +00:00
v3.RancherKubernetesEngineConfig `yaml:",inline"`
2017-12-16 03:38:15 +00:00
ConfigPath string
LocalKubeConfigPath string
EtcdHosts []*hosts.Host
WorkerHosts []*hosts.Host
ControlPlaneHosts []*hosts.Host
2018-02-21 23:13:08 +00:00
InactiveHosts []*hosts.Host
2017-11-14 18:11:21 +00:00
KubeClient *kubernetes.Clientset
KubernetesServiceIP net.IP
Certificates map[string]pki.CertificatePKI
ClusterDomain string
ClusterCIDR string
ClusterDNSServer string
DockerDialerFactory hosts.DialerFactory
LocalConnDialerFactory hosts.DialerFactory
PrivateRegistriesMap map[string]v3.PrivateRegistry
K8sWrapTransport k8s.WrapTransport
UseKubectlDeploy bool
UpdateWorkersOnly bool
}
const (
X509AuthenticationProvider = "x509"
StateConfigMapName = "cluster-state"
UpdateStateTimeout = 30
GetStateTimeout = 30
KubernetesClientTimeOut = 30
2017-12-14 21:56:19 +00:00
NoneAuthorizationMode = "none"
LocalNodeAddress = "127.0.0.1"
LocalNodeHostname = "localhost"
LocalNodeUser = "root"
)
func (c *Cluster) DeployControlPlane(ctx context.Context) error {
// Deploy Etcd Plane
etcdProcessHostMap := c.getEtcdProcessHostMap(nil)
2018-02-14 20:58:35 +00:00
if len(c.Services.Etcd.ExternalURLs) > 0 {
log.Infof(ctx, "[etcd] External etcd connection string has been specified, skipping etcd plane")
} else {
if err := services.RunEtcdPlane(ctx, c.EtcdHosts, etcdProcessHostMap, c.LocalConnDialerFactory, c.PrivateRegistriesMap, c.UpdateWorkersOnly, c.SystemImages.Alpine); err != nil {
2018-02-14 20:58:35 +00:00
return fmt.Errorf("[etcd] Failed to bring up Etcd Plane: %v", err)
}
}
2018-02-14 20:58:35 +00:00
// Deploy Control plane
processMap := map[string]v3.Process{
services.SidekickContainerName: c.BuildSidecarProcess(),
services.KubeAPIContainerName: c.BuildKubeAPIProcess(),
services.KubeControllerContainerName: c.BuildKubeControllerProcess(),
services.SchedulerContainerName: c.BuildSchedulerProcess(),
}
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
c.LocalConnDialerFactory,
c.PrivateRegistriesMap,
processMap,
c.UpdateWorkersOnly,
c.SystemImages.Alpine); err != nil {
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
}
2018-02-26 21:14:04 +00:00
return nil
}
func (c *Cluster) DeployWorkerPlane(ctx context.Context) error {
// Deploy Worker Plane
processMap := map[string]v3.Process{
services.SidekickContainerName: c.BuildSidecarProcess(),
services.KubeproxyContainerName: c.BuildKubeProxyProcess(),
services.NginxProxyContainerName: c.BuildProxyProcess(),
}
kubeletProcessHostMap := make(map[*hosts.Host]v3.Process)
for _, host := range hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts) {
kubeletProcessHostMap[host] = c.BuildKubeletProcess(host)
}
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
if err := services.RunWorkerPlane(ctx, allHosts,
c.LocalConnDialerFactory,
c.PrivateRegistriesMap,
processMap,
kubeletProcessHostMap,
2018-03-12 19:04:28 +00:00
c.Certificates,
c.UpdateWorkersOnly,
c.SystemImages.Alpine); err != nil {
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
}
return nil
}
2017-12-16 03:38:15 +00:00
func ParseConfig(clusterFile string) (*v3.RancherKubernetesEngineConfig, error) {
logrus.Debugf("Parsing cluster file [%v]", clusterFile)
2017-12-16 03:38:15 +00:00
var rkeConfig v3.RancherKubernetesEngineConfig
if err := yaml.Unmarshal([]byte(clusterFile), &rkeConfig); err != nil {
return nil, err
}
return &rkeConfig, nil
}
func ParseCluster(
ctx context.Context,
rkeConfig *v3.RancherKubernetesEngineConfig,
clusterFilePath, configDir string,
dockerDialerFactory,
localConnDialerFactory hosts.DialerFactory,
k8sWrapTransport k8s.WrapTransport) (*Cluster, error) {
var err error
2017-12-16 03:38:15 +00:00
c := &Cluster{
RancherKubernetesEngineConfig: *rkeConfig,
ConfigPath: clusterFilePath,
DockerDialerFactory: dockerDialerFactory,
LocalConnDialerFactory: localConnDialerFactory,
PrivateRegistriesMap: make(map[string]v3.PrivateRegistry),
K8sWrapTransport: k8sWrapTransport,
}
2017-12-16 03:38:15 +00:00
// Setting cluster Defaults
c.setClusterDefaults(ctx)
2017-12-16 03:38:15 +00:00
if err := c.InvertIndexHosts(); err != nil {
return nil, fmt.Errorf("Failed to classify hosts from config file: %v", err)
}
2017-12-16 03:38:15 +00:00
if err := c.ValidateCluster(); err != nil {
return nil, fmt.Errorf("Failed to validate cluster: %v", err)
}
c.KubernetesServiceIP, err = pki.GetKubernetesServiceIP(c.Services.KubeAPI.ServiceClusterIPRange)
if err != nil {
return nil, fmt.Errorf("Failed to get Kubernetes Service IP: %v", err)
}
c.ClusterDomain = c.Services.Kubelet.ClusterDomain
c.ClusterCIDR = c.Services.KubeController.ClusterCIDR
2017-11-08 00:32:55 +00:00
c.ClusterDNSServer = c.Services.Kubelet.ClusterDNSServer
if len(c.ConfigPath) == 0 {
c.ConfigPath = pki.ClusterConfig
}
c.LocalKubeConfigPath = pki.GetLocalKubeConfig(c.ConfigPath, configDir)
for _, pr := range c.PrivateRegistries {
if pr.URL == "" {
pr.URL = docker.DockerRegistryURL
}
c.PrivateRegistriesMap[pr.URL] = pr
}
return c, nil
}
func rebuildLocalAdminConfig(ctx context.Context, kubeCluster *Cluster) error {
2018-02-15 03:25:36 +00:00
if len(kubeCluster.ControlPlaneHosts) == 0 {
return nil
}
log.Infof(ctx, "[reconcile] Rebuilding and updating local kube config")
var workingConfig, newConfig string
currentKubeConfig := kubeCluster.Certificates[pki.KubeAdminCertName]
caCrt := kubeCluster.Certificates[pki.CACertName].Certificate
for _, cpHost := range kubeCluster.ControlPlaneHosts {
if (currentKubeConfig == pki.CertificatePKI{}) {
kubeCluster.Certificates = make(map[string]pki.CertificatePKI)
newConfig = getLocalAdminConfigWithNewAddress(kubeCluster.LocalKubeConfigPath, cpHost.Address, kubeCluster.ClusterName)
} else {
kubeURL := fmt.Sprintf("https://%s:6443", cpHost.Address)
caData := string(cert.EncodeCertPEM(caCrt))
crtData := string(cert.EncodeCertPEM(currentKubeConfig.Certificate))
keyData := string(cert.EncodePrivateKeyPEM(currentKubeConfig.Key))
newConfig = pki.GetKubeConfigX509WithData(kubeURL, kubeCluster.ClusterName, pki.KubeAdminCertName, caData, crtData, keyData)
}
if err := pki.DeployAdminConfig(ctx, newConfig, kubeCluster.LocalKubeConfigPath); err != nil {
return fmt.Errorf("Failed to redeploy local admin config with new host")
}
workingConfig = newConfig
if _, err := GetK8sVersion(kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err == nil {
log.Infof(ctx, "[reconcile] host [%s] is active master on the cluster", cpHost.Address)
break
}
}
currentKubeConfig.Config = workingConfig
kubeCluster.Certificates[pki.KubeAdminCertName] = currentKubeConfig
return nil
}
func isLocalConfigWorking(ctx context.Context, localKubeConfigPath string, k8sWrapTransport k8s.WrapTransport) bool {
if _, err := GetK8sVersion(localKubeConfigPath, k8sWrapTransport); err != nil {
log.Infof(ctx, "[reconcile] Local config is not vaild, rebuilding admin config")
return false
}
return true
}
func getLocalConfigAddress(localConfigPath string) (string, error) {
config, err := clientcmd.BuildConfigFromFlags("", localConfigPath)
if err != nil {
return "", err
}
splittedAdress := strings.Split(config.Host, ":")
address := splittedAdress[1]
return address[2:], nil
}
func getLocalAdminConfigWithNewAddress(localConfigPath, cpAddress string, clusterName string) string {
config, _ := clientcmd.BuildConfigFromFlags("", localConfigPath)
2018-02-15 03:25:36 +00:00
if config == nil {
return ""
}
config.Host = fmt.Sprintf("https://%s:6443", cpAddress)
return pki.GetKubeConfigX509WithData(
"https://"+cpAddress+":6443",
clusterName,
pki.KubeAdminCertName,
string(config.CAData),
string(config.CertData),
string(config.KeyData))
}
2018-02-26 21:14:04 +00:00
func ApplyAuthzResources(ctx context.Context, rkeConfig v3.RancherKubernetesEngineConfig, clusterFilePath, configDir string, k8sWrapTransport k8s.WrapTransport) error {
// dialer factories are not needed here since we are not uses docker only k8s jobs
kubeCluster, err := ParseCluster(ctx, &rkeConfig, clusterFilePath, configDir, nil, nil, k8sWrapTransport)
if err != nil {
return err
}
if len(kubeCluster.ControlPlaneHosts) == 0 {
return nil
}
if err := authz.ApplyJobDeployerServiceAccount(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
return fmt.Errorf("Failed to apply the ServiceAccount needed for job execution: %v", err)
}
2018-02-26 21:14:04 +00:00
if kubeCluster.Authorization.Mode == NoneAuthorizationMode {
2017-12-14 21:56:19 +00:00
return nil
}
2018-02-26 21:14:04 +00:00
if kubeCluster.Authorization.Mode == services.RBACAuthorizationMode {
if err := authz.ApplySystemNodeClusterRoleBinding(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
return fmt.Errorf("Failed to apply the ClusterRoleBinding needed for node authorization: %v", err)
}
}
2018-02-26 21:14:04 +00:00
if kubeCluster.Authorization.Mode == services.RBACAuthorizationMode && kubeCluster.Services.KubeAPI.PodSecurityPolicy {
if err := authz.ApplyDefaultPodSecurityPolicy(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
2017-12-20 01:51:07 +00:00
return fmt.Errorf("Failed to apply default PodSecurityPolicy: %v", err)
}
2018-02-26 21:14:04 +00:00
if err := authz.ApplyDefaultPodSecurityPolicyRole(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
2017-12-20 01:51:07 +00:00
return fmt.Errorf("Failed to apply default PodSecurityPolicy ClusterRole and ClusterRoleBinding: %v", err)
}
}
return nil
}
2018-01-16 18:29:09 +00:00
func (c *Cluster) deployAddons(ctx context.Context) error {
if err := c.deployK8sAddOns(ctx); err != nil {
return err
}
return c.deployUserAddOns(ctx)
}
func (c *Cluster) SyncLabelsAndTaints(ctx context.Context) error {
2018-02-15 03:25:36 +00:00
if len(c.ControlPlaneHosts) > 0 {
log.Infof(ctx, "[sync] Syncing nodes Labels and Taints")
k8sClient, err := k8s.NewClient(c.LocalKubeConfigPath, c.K8sWrapTransport)
2018-02-15 03:25:36 +00:00
if err != nil {
return fmt.Errorf("Failed to initialize new kubernetes client: %v", err)
}
2018-02-15 03:25:36 +00:00
for _, host := range hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts) {
2018-03-26 22:53:28 +00:00
if err := k8s.SetAddressesAnnotations(k8sClient, host.HostnameOverride, host.InternalAddress, host.Address); err != nil {
return err
}
2018-02-15 03:25:36 +00:00
if err := k8s.SyncLabels(k8sClient, host.HostnameOverride, host.ToAddLabels, host.ToDelLabels); err != nil {
return err
}
// Taints are not being added by user
if err := k8s.SyncTaints(k8sClient, host.HostnameOverride, host.ToAddTaints, host.ToDelTaints); err != nil {
return err
}
}
2018-02-15 03:25:36 +00:00
log.Infof(ctx, "[sync] Successfully synced nodes Labels and Taints")
}
return nil
}
2018-02-01 21:43:09 +00:00
func (c *Cluster) PrePullK8sImages(ctx context.Context) error {
log.Infof(ctx, "Pre-pulling kubernetes images")
var errgrp errgroup.Group
hosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
2018-02-01 21:43:09 +00:00
for _, host := range hosts {
if !host.UpdateWorker {
continue
}
2018-02-01 21:43:09 +00:00
runHost := host
errgrp.Go(func() error {
return docker.UseLocalOrPull(ctx, runHost.DClient, runHost.Address, c.SystemImages.Kubernetes, "pre-deploy", c.PrivateRegistriesMap)
})
}
if err := errgrp.Wait(); err != nil {
return err
}
log.Infof(ctx, "Kubernetes images pulled successfully")
return nil
}
2018-03-01 21:32:25 +00:00
func ConfigureCluster(ctx context.Context, rkeConfig v3.RancherKubernetesEngineConfig, crtBundle map[string]pki.CertificatePKI, clusterFilePath, configDir string, k8sWrapTransport k8s.WrapTransport, useKubectl bool) error {
// dialer factories are not needed here since we are not uses docker only k8s jobs
kubeCluster, err := ParseCluster(ctx, &rkeConfig, clusterFilePath, configDir, nil, nil, k8sWrapTransport)
if err != nil {
return err
}
2018-03-01 21:32:25 +00:00
kubeCluster.UseKubectlDeploy = useKubectl
2018-02-15 03:25:36 +00:00
if len(kubeCluster.ControlPlaneHosts) > 0 {
kubeCluster.Certificates = crtBundle
if err := kubeCluster.deployNetworkPlugin(ctx); err != nil {
return err
}
return kubeCluster.deployAddons(ctx)
}
2018-02-15 03:25:36 +00:00
return nil
}
func (c *Cluster) getEtcdProcessHostMap(readyEtcdHosts []*hosts.Host) map[*hosts.Host]v3.Process {
etcdProcessHostMap := make(map[*hosts.Host]v3.Process)
for _, host := range c.EtcdHosts {
2018-02-16 23:43:34 +00:00
if !host.ToAddEtcdMember {
etcdProcessHostMap[host] = c.BuildEtcdProcess(host, readyEtcdHosts)
}
}
return etcdProcessHostMap
}