2017-11-02 10:07:10 +00:00
|
|
|
package cluster
|
|
|
|
|
|
|
|
import (
|
2018-01-09 22:10:56 +00:00
|
|
|
"context"
|
2019-10-30 20:04:16 +00:00
|
|
|
"encoding/json"
|
2017-11-02 10:07:10 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2018-10-16 21:52:15 +00:00
|
|
|
"reflect"
|
2017-11-30 23:16:45 +00:00
|
|
|
"strings"
|
2018-10-16 21:52:15 +00:00
|
|
|
"time"
|
2017-11-02 10:07:10 +00:00
|
|
|
|
2018-10-31 23:11:57 +00:00
|
|
|
"github.com/docker/docker/api/types"
|
2019-10-30 20:04:16 +00:00
|
|
|
ghodssyaml "github.com/ghodss/yaml"
|
2019-11-12 02:25:31 +00:00
|
|
|
"github.com/rancher/norman/types/convert"
|
2017-12-14 21:56:19 +00:00
|
|
|
"github.com/rancher/rke/authz"
|
2018-01-30 18:15:14 +00:00
|
|
|
"github.com/rancher/rke/docker"
|
2017-11-02 10:07:10 +00:00
|
|
|
"github.com/rancher/rke/hosts"
|
2018-02-01 21:28:31 +00:00
|
|
|
"github.com/rancher/rke/k8s"
|
2018-01-09 22:10:56 +00:00
|
|
|
"github.com/rancher/rke/log"
|
2019-08-19 17:53:15 +00:00
|
|
|
"github.com/rancher/rke/metadata"
|
2017-11-02 10:07:10 +00:00
|
|
|
"github.com/rancher/rke/pki"
|
2019-10-30 20:04:16 +00:00
|
|
|
"github.com/rancher/rke/pki/cert"
|
2017-11-02 10:07:10 +00:00
|
|
|
"github.com/rancher/rke/services"
|
2018-10-17 22:26:54 +00:00
|
|
|
"github.com/rancher/rke/util"
|
2019-03-04 16:59:43 +00:00
|
|
|
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
2017-11-13 21:28:38 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2018-02-01 21:43:09 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2017-12-05 16:55:58 +00:00
|
|
|
"gopkg.in/yaml.v2"
|
2019-03-04 16:59:43 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2019-10-30 20:04:16 +00:00
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
|
|
|
apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
|
|
|
|
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
|
2017-11-02 10:07:10 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
2017-11-30 23:16:45 +00:00
|
|
|
"k8s.io/client-go/tools/clientcmd"
|
2019-08-19 17:53:15 +00:00
|
|
|
"k8s.io/client-go/transport"
|
2017-11-02 10:07:10 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Cluster struct {
|
2018-12-28 16:41:37 +00:00
|
|
|
AuthnStrategies map[string]bool
|
2017-12-16 03:38:15 +00:00
|
|
|
ConfigPath string
|
2018-11-07 23:54:08 +00:00
|
|
|
ConfigDir string
|
|
|
|
CloudConfigFile string
|
2017-11-30 23:16:45 +00:00
|
|
|
ControlPlaneHosts []*hosts.Host
|
2017-11-14 18:11:21 +00:00
|
|
|
Certificates map[string]pki.CertificatePKI
|
2019-01-02 23:02:34 +00:00
|
|
|
CertificateDir string
|
2017-11-14 18:11:21 +00:00
|
|
|
ClusterDomain string
|
|
|
|
ClusterCIDR string
|
|
|
|
ClusterDNSServer string
|
2019-01-07 19:52:57 +00:00
|
|
|
DinD bool
|
2017-12-19 22:18:27 +00:00
|
|
|
DockerDialerFactory hosts.DialerFactory
|
2018-11-07 23:54:08 +00:00
|
|
|
EtcdHosts []*hosts.Host
|
|
|
|
EtcdReadyHosts []*hosts.Host
|
2019-04-23 21:42:10 +00:00
|
|
|
ForceDeployCerts bool
|
2018-11-07 23:54:08 +00:00
|
|
|
InactiveHosts []*hosts.Host
|
2019-08-19 17:53:15 +00:00
|
|
|
K8sWrapTransport transport.WrapperFunc
|
2018-11-07 23:54:08 +00:00
|
|
|
KubeClient *kubernetes.Clientset
|
|
|
|
KubernetesServiceIP net.IP
|
|
|
|
LocalKubeConfigPath string
|
2018-01-11 01:00:14 +00:00
|
|
|
LocalConnDialerFactory hosts.DialerFactory
|
2018-01-30 18:15:14 +00:00
|
|
|
PrivateRegistriesMap map[string]v3.PrivateRegistry
|
2018-11-07 23:54:08 +00:00
|
|
|
StateFilePath string
|
2018-03-20 12:56:49 +00:00
|
|
|
UpdateWorkersOnly bool
|
2019-01-02 23:02:34 +00:00
|
|
|
UseKubectlDeploy bool
|
2018-11-07 23:54:08 +00:00
|
|
|
v3.RancherKubernetesEngineConfig `yaml:",inline"`
|
|
|
|
WorkerHosts []*hosts.Host
|
2019-10-03 01:56:39 +00:00
|
|
|
EncryptionConfig encryptionConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
type encryptionConfig struct {
|
|
|
|
RewriteSecrets bool
|
|
|
|
RotateKey bool
|
|
|
|
EncryptionProviderFile string
|
2017-11-02 10:07:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2019-11-01 21:08:43 +00:00
|
|
|
AuthnX509Provider = "x509"
|
|
|
|
AuthnWebhookProvider = "webhook"
|
|
|
|
StateConfigMapName = "cluster-state"
|
|
|
|
FullStateConfigMapName = "full-cluster-state"
|
|
|
|
UpdateStateTimeout = 30
|
|
|
|
GetStateTimeout = 30
|
|
|
|
SyncWorkers = 10
|
|
|
|
NoneAuthorizationMode = "none"
|
|
|
|
LocalNodeAddress = "127.0.0.1"
|
|
|
|
LocalNodeHostname = "localhost"
|
|
|
|
LocalNodeUser = "root"
|
|
|
|
CloudProvider = "CloudProvider"
|
|
|
|
ControlPlane = "controlPlane"
|
|
|
|
KubeAppLabel = "k8s-app"
|
|
|
|
AppLabel = "app"
|
|
|
|
NameLabel = "name"
|
2018-08-20 04:37:04 +00:00
|
|
|
|
2018-10-17 22:26:54 +00:00
|
|
|
WorkerThreads = util.WorkerThreads
|
2019-03-09 02:09:16 +00:00
|
|
|
|
|
|
|
serviceAccountTokenFileParam = "service-account-key-file"
|
2019-08-01 22:35:56 +00:00
|
|
|
|
|
|
|
SystemNamespace = "kube-system"
|
2017-11-02 10:07:10 +00:00
|
|
|
)
|
|
|
|
|
2019-09-06 22:53:14 +00:00
|
|
|
func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
|
2017-12-26 22:07:25 +00:00
|
|
|
// Deploy Etcd Plane
|
2018-04-11 22:54:47 +00:00
|
|
|
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
|
|
|
// Build etcd node plan map
|
|
|
|
for _, etcdHost := range c.EtcdHosts {
|
2019-09-06 22:53:14 +00:00
|
|
|
etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo, svcOptionData)
|
2018-04-11 22:54:47 +00:00
|
|
|
}
|
|
|
|
|
2018-02-14 20:58:35 +00:00
|
|
|
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
|
|
|
log.Infof(ctx, "[etcd] External etcd connection string has been specified, skipping etcd plane")
|
|
|
|
} else {
|
2019-01-25 19:26:29 +00:00
|
|
|
if err := services.RunEtcdPlane(ctx, c.EtcdHosts, etcdNodePlanMap, c.LocalConnDialerFactory, c.PrivateRegistriesMap, c.UpdateWorkersOnly, c.SystemImages.Alpine, c.Services.Etcd, c.Certificates); err != nil {
|
2018-02-14 20:58:35 +00:00
|
|
|
return fmt.Errorf("[etcd] Failed to bring up Etcd Plane: %v", err)
|
|
|
|
}
|
2017-11-02 10:07:10 +00:00
|
|
|
}
|
2018-02-14 20:58:35 +00:00
|
|
|
|
2017-12-26 22:07:25 +00:00
|
|
|
// Deploy Control plane
|
2018-04-11 22:54:47 +00:00
|
|
|
cpNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
|
|
|
// Build cp node plan map
|
|
|
|
for _, cpHost := range c.ControlPlaneHosts {
|
2019-09-06 22:53:14 +00:00
|
|
|
cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo, svcOptionData)
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
2018-01-09 22:10:56 +00:00
|
|
|
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
|
2018-01-30 18:15:14 +00:00
|
|
|
c.LocalConnDialerFactory,
|
2018-02-13 00:47:56 +00:00
|
|
|
c.PrivateRegistriesMap,
|
2018-04-11 22:54:47 +00:00
|
|
|
cpNodePlanMap,
|
2018-03-21 17:20:58 +00:00
|
|
|
c.UpdateWorkersOnly,
|
2018-05-01 00:25:52 +00:00
|
|
|
c.SystemImages.Alpine,
|
|
|
|
c.Certificates); err != nil {
|
2017-11-02 10:07:10 +00:00
|
|
|
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
|
|
|
|
}
|
2018-02-26 21:14:04 +00:00
|
|
|
|
2017-12-26 22:07:25 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-06 22:53:14 +00:00
|
|
|
func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
|
2018-04-11 22:54:47 +00:00
|
|
|
// Deploy Worker plane
|
|
|
|
workerNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
|
|
|
// Build cp node plan map
|
2018-04-14 17:18:51 +00:00
|
|
|
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
|
|
|
for _, workerHost := range allHosts {
|
2019-09-06 22:53:14 +00:00
|
|
|
workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo, svcOptionData)
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
if err := services.RunWorkerPlane(ctx, allHosts,
|
2018-01-30 18:15:14 +00:00
|
|
|
c.LocalConnDialerFactory,
|
2018-02-13 00:47:56 +00:00
|
|
|
c.PrivateRegistriesMap,
|
2018-04-11 22:54:47 +00:00
|
|
|
workerNodePlanMap,
|
2018-03-12 19:04:28 +00:00
|
|
|
c.Certificates,
|
2018-03-20 12:56:49 +00:00
|
|
|
c.UpdateWorkersOnly,
|
2018-03-21 17:20:58 +00:00
|
|
|
c.SystemImages.Alpine); err != nil {
|
2017-11-02 10:07:10 +00:00
|
|
|
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-30 20:04:16 +00:00
|
|
|
func parseAuditLogConfig(clusterFile string, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
|
|
|
if rkeConfig.Services.KubeAPI.AuditLog != nil &&
|
|
|
|
rkeConfig.Services.KubeAPI.AuditLog.Enabled &&
|
|
|
|
rkeConfig.Services.KubeAPI.AuditLog.Configuration.Policy == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
logrus.Debugf("audit log policy found in cluster.yml")
|
|
|
|
var r map[string]interface{}
|
2019-10-31 20:35:48 +00:00
|
|
|
err := ghodssyaml.Unmarshal([]byte(clusterFile), &r)
|
2019-10-30 20:04:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error unmarshalling: %v", err)
|
|
|
|
}
|
2019-10-31 20:35:48 +00:00
|
|
|
if r["services"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
services := r["services"].(map[string]interface{})
|
2019-10-31 20:35:48 +00:00
|
|
|
if services["kube-api"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
kubeapi := services["kube-api"].(map[string]interface{})
|
2019-10-31 20:35:48 +00:00
|
|
|
if kubeapi["audit_log"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
auditlog := kubeapi["audit_log"].(map[string]interface{})
|
2019-10-31 20:35:48 +00:00
|
|
|
if auditlog["configuration"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
alc := auditlog["configuration"].(map[string]interface{})
|
2019-10-31 20:35:48 +00:00
|
|
|
if alc["policy"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
policyBytes, err := json.Marshal(alc["policy"])
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error marshalling audit policy: %v", err)
|
|
|
|
}
|
|
|
|
scheme := runtime.NewScheme()
|
|
|
|
err = auditv1.AddToScheme(scheme)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error adding to scheme: %v", err)
|
|
|
|
}
|
|
|
|
codecs := serializer.NewCodecFactory(scheme)
|
|
|
|
p := auditv1.Policy{}
|
|
|
|
err = runtime.DecodeInto(codecs.UniversalDecoder(), policyBytes, &p)
|
|
|
|
if err != nil || p.Kind != "Policy" {
|
|
|
|
return fmt.Errorf("error decoding audit policy: %v", err)
|
|
|
|
}
|
|
|
|
rkeConfig.Services.KubeAPI.AuditLog.Configuration.Policy = &p
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseAdmissionConfig(clusterFile string, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
|
|
|
if rkeConfig.Services.KubeAPI.AdmissionConfiguration == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
logrus.Debugf("admission configuration found in cluster.yml")
|
|
|
|
var r map[string]interface{}
|
2019-10-31 20:35:48 +00:00
|
|
|
err := ghodssyaml.Unmarshal([]byte(clusterFile), &r)
|
2019-10-30 20:04:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error unmarshalling: %v", err)
|
|
|
|
}
|
2019-10-31 20:35:48 +00:00
|
|
|
if r["services"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
services := r["services"].(map[string]interface{})
|
2019-10-31 20:35:48 +00:00
|
|
|
if services["kube-api"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
kubeapi := services["kube-api"].(map[string]interface{})
|
2019-10-31 20:35:48 +00:00
|
|
|
if kubeapi["admission_configuration"] == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
data, err := json.Marshal(kubeapi["admission_configuration"])
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error marshalling admission configuration: %v", err)
|
|
|
|
}
|
|
|
|
scheme := runtime.NewScheme()
|
|
|
|
err = apiserverv1alpha1.AddToScheme(scheme)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error adding to scheme: %v", err)
|
|
|
|
}
|
|
|
|
err = scheme.SetVersionPriority(apiserverv1alpha1.SchemeGroupVersion)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error setting version priority: %v", err)
|
|
|
|
}
|
|
|
|
codecs := serializer.NewCodecFactory(scheme)
|
|
|
|
decoder := codecs.UniversalDecoder(apiserverv1alpha1.SchemeGroupVersion)
|
|
|
|
decodedObj, err := runtime.Decode(decoder, data)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error decoding data: %v", err)
|
|
|
|
}
|
|
|
|
decodedConfig, ok := decodedObj.(*apiserverv1alpha1.AdmissionConfiguration)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected type: %T", decodedObj)
|
|
|
|
}
|
|
|
|
rkeConfig.Services.KubeAPI.AdmissionConfiguration = decodedConfig
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-12 02:25:31 +00:00
|
|
|
func parseIngressConfig(clusterFile string, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
|
|
|
if &rkeConfig.Ingress == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var r map[string]interface{}
|
|
|
|
err := ghodssyaml.Unmarshal([]byte(clusterFile), &r)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressConfig] error unmarshalling ingress config: %v", err)
|
|
|
|
}
|
|
|
|
ingressMap := convert.ToMapInterface(r["ingress"])
|
|
|
|
if err := parseIngressExtraEnv(ingressMap, rkeConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := parseIngressExtraVolumes(ingressMap, rkeConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := parseIngressExtraVolumeMounts(ingressMap, rkeConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseIngressExtraEnv(ingressMap map[string]interface{}, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
|
|
|
extraEnvs, ok := ingressMap["extra_envs"]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
ingressEnvBytes, err := json.Marshal(extraEnvs)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressExtraEnv] error marshalling ingress config extraEnvs: %v", err)
|
|
|
|
}
|
|
|
|
var envs []v3.ExtraEnv
|
|
|
|
err = json.Unmarshal(ingressEnvBytes, &envs)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressExtraEnv] error unmarshaling ingress config extraEnvs: %v", err)
|
|
|
|
}
|
|
|
|
rkeConfig.Ingress.ExtraEnvs = envs
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseIngressExtraVolumes(ingressMap map[string]interface{}, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
|
|
|
extraVolumes, ok := ingressMap["extra_volumes"]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
ingressVolBytes, err := json.Marshal(extraVolumes)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressExtraVolumes] error marshalling ingress config extraVolumes: %v", err)
|
|
|
|
}
|
|
|
|
var volumes []v3.ExtraVolume
|
|
|
|
err = json.Unmarshal(ingressVolBytes, &volumes)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressExtraVolumes] error unmarshaling ingress config extraVolumes: %v", err)
|
|
|
|
}
|
|
|
|
rkeConfig.Ingress.ExtraVolumes = volumes
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseIngressExtraVolumeMounts(ingressMap map[string]interface{}, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
|
|
|
extraVolMounts, ok := ingressMap["extra_volume_mounts"]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
ingressVolMountBytes, err := json.Marshal(extraVolMounts)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressExtraVolumeMounts] error marshalling ingress config extraVolumeMounts: %v", err)
|
|
|
|
}
|
|
|
|
var volumeMounts []v3.ExtraVolumeMount
|
|
|
|
err = json.Unmarshal(ingressVolMountBytes, &volumeMounts)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[parseIngressExtraVolumeMounts] error unmarshaling ingress config extraVolumeMounts: %v", err)
|
|
|
|
}
|
|
|
|
rkeConfig.Ingress.ExtraVolumeMounts = volumeMounts
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-16 03:38:15 +00:00
|
|
|
func ParseConfig(clusterFile string) (*v3.RancherKubernetesEngineConfig, error) {
|
2017-11-02 10:07:10 +00:00
|
|
|
logrus.Debugf("Parsing cluster file [%v]", clusterFile)
|
2017-12-16 03:38:15 +00:00
|
|
|
var rkeConfig v3.RancherKubernetesEngineConfig
|
2019-10-03 01:56:39 +00:00
|
|
|
|
|
|
|
// the customConfig is mapped to a k8s type, which doesn't unmarshal well because it has a
|
|
|
|
// nested struct and no yaml tags. Therefor, we have to re-parse it again and assign it correctly.
|
|
|
|
// this only affects rke cli. Since rkeConfig is passed from rancher directly in the rancher use case.
|
2019-11-01 21:08:43 +00:00
|
|
|
clusterFile, secretConfig, err := resolveCustomEncryptionConfig(clusterFile)
|
2019-10-03 01:56:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-16 03:38:15 +00:00
|
|
|
if err := yaml.Unmarshal([]byte(clusterFile), &rkeConfig); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-03 01:56:39 +00:00
|
|
|
|
|
|
|
if isEncryptionEnabled(&rkeConfig) && secretConfig != nil {
|
|
|
|
rkeConfig.Services.KubeAPI.SecretsEncryptionConfig.CustomConfig = secretConfig
|
|
|
|
}
|
2019-10-30 20:04:16 +00:00
|
|
|
if err := parseAdmissionConfig(clusterFile, &rkeConfig); err != nil {
|
|
|
|
return &rkeConfig, fmt.Errorf("error parsing admission config: %v", err)
|
|
|
|
}
|
|
|
|
if err := parseAuditLogConfig(clusterFile, &rkeConfig); err != nil {
|
|
|
|
return &rkeConfig, fmt.Errorf("error parsing audit log config: %v", err)
|
|
|
|
}
|
2019-11-12 02:25:31 +00:00
|
|
|
|
|
|
|
if err := parseIngressConfig(clusterFile, &rkeConfig); err != nil {
|
|
|
|
return &rkeConfig, fmt.Errorf("error parsing ingress config: %v", err)
|
|
|
|
}
|
2017-12-16 03:38:15 +00:00
|
|
|
return &rkeConfig, nil
|
|
|
|
}
|
|
|
|
|
2019-10-03 01:56:39 +00:00
|
|
|
func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, flags ExternalFlags, encryptConfig string) (*Cluster, error) {
|
2018-11-02 05:53:29 +00:00
|
|
|
// basic cluster object from rkeConfig
|
2019-10-03 01:56:39 +00:00
|
|
|
var err error
|
2017-12-16 03:38:15 +00:00
|
|
|
c := &Cluster{
|
2018-12-28 16:41:37 +00:00
|
|
|
AuthnStrategies: make(map[string]bool),
|
2017-12-16 03:38:15 +00:00
|
|
|
RancherKubernetesEngineConfig: *rkeConfig,
|
2018-11-07 23:54:08 +00:00
|
|
|
ConfigPath: flags.ClusterFilePath,
|
|
|
|
ConfigDir: flags.ConfigDir,
|
2019-01-07 19:52:57 +00:00
|
|
|
DinD: flags.DinD,
|
2019-01-02 23:02:34 +00:00
|
|
|
CertificateDir: flags.CertificateDir,
|
2018-11-07 23:54:08 +00:00
|
|
|
StateFilePath: GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir),
|
2018-01-30 18:15:14 +00:00
|
|
|
PrivateRegistriesMap: make(map[string]v3.PrivateRegistry),
|
2019-10-03 01:56:39 +00:00
|
|
|
EncryptionConfig: encryptionConfig{
|
|
|
|
EncryptionProviderFile: encryptConfig,
|
|
|
|
},
|
2018-11-02 05:53:29 +00:00
|
|
|
}
|
2019-07-08 22:21:03 +00:00
|
|
|
if metadata.K8sVersionToRKESystemImages == nil {
|
|
|
|
metadata.InitMetadata(ctx)
|
|
|
|
}
|
2018-11-02 05:53:29 +00:00
|
|
|
if len(c.ConfigPath) == 0 {
|
|
|
|
c.ConfigPath = pki.ClusterConfig
|
2017-11-02 10:07:10 +00:00
|
|
|
}
|
2019-01-02 23:02:34 +00:00
|
|
|
// set kube_config, state file, and certificate dir
|
2018-11-07 23:54:08 +00:00
|
|
|
c.LocalKubeConfigPath = pki.GetLocalKubeConfig(c.ConfigPath, c.ConfigDir)
|
|
|
|
c.StateFilePath = GetStateFilePath(c.ConfigPath, c.ConfigDir)
|
2019-01-02 23:02:34 +00:00
|
|
|
if len(c.CertificateDir) == 0 {
|
|
|
|
c.CertificateDir = GetCertificateDirPath(c.ConfigPath, c.ConfigDir)
|
|
|
|
}
|
2019-10-03 01:56:39 +00:00
|
|
|
// We don't manage custom configuration, if it's there we just use it.
|
|
|
|
if isEncryptionCustomConfig(rkeConfig) {
|
|
|
|
if c.EncryptionConfig.EncryptionProviderFile, err = c.readEncryptionCustomConfig(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else if isEncryptionEnabled(rkeConfig) && c.EncryptionConfig.EncryptionProviderFile == "" {
|
|
|
|
if c.EncryptionConfig.EncryptionProviderFile, err = c.getEncryptionProviderFile(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2018-11-03 01:45:23 +00:00
|
|
|
|
2017-12-16 03:38:15 +00:00
|
|
|
// Setting cluster Defaults
|
2019-10-03 01:56:39 +00:00
|
|
|
err = c.setClusterDefaults(ctx, flags)
|
2019-01-24 19:20:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-11-02 05:53:29 +00:00
|
|
|
// extract cluster network configuration
|
2019-09-11 22:07:51 +00:00
|
|
|
if err = c.setNetworkOptions(); err != nil {
|
|
|
|
return nil, fmt.Errorf("failed set network options: %v", err)
|
|
|
|
}
|
2018-12-28 16:41:37 +00:00
|
|
|
|
2018-11-07 23:54:08 +00:00
|
|
|
// Register cloud provider
|
|
|
|
if err := c.setCloudProvider(); err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to register cloud provider: %v", err)
|
|
|
|
}
|
2018-11-02 05:53:29 +00:00
|
|
|
// set hosts groups
|
2017-12-16 03:38:15 +00:00
|
|
|
if err := c.InvertIndexHosts(); err != nil {
|
2017-11-02 10:07:10 +00:00
|
|
|
return nil, fmt.Errorf("Failed to classify hosts from config file: %v", err)
|
|
|
|
}
|
2018-11-02 05:53:29 +00:00
|
|
|
// validate cluster configuration
|
2019-03-01 18:09:45 +00:00
|
|
|
if err := c.ValidateCluster(ctx); err != nil {
|
2017-11-21 19:25:08 +00:00
|
|
|
return nil, fmt.Errorf("Failed to validate cluster: %v", err)
|
|
|
|
}
|
2018-11-02 05:53:29 +00:00
|
|
|
return c, nil
|
|
|
|
}
|
2017-11-21 19:25:08 +00:00
|
|
|
|
2018-11-02 05:53:29 +00:00
|
|
|
func (c *Cluster) setNetworkOptions() error {
|
|
|
|
var err error
|
2018-02-06 19:25:54 +00:00
|
|
|
c.KubernetesServiceIP, err = pki.GetKubernetesServiceIP(c.Services.KubeAPI.ServiceClusterIPRange)
|
2017-11-02 10:07:10 +00:00
|
|
|
if err != nil {
|
2018-11-02 05:53:29 +00:00
|
|
|
return fmt.Errorf("Failed to get Kubernetes Service IP: %v", err)
|
2017-11-02 10:07:10 +00:00
|
|
|
}
|
|
|
|
c.ClusterDomain = c.Services.Kubelet.ClusterDomain
|
2017-11-06 20:50:41 +00:00
|
|
|
c.ClusterCIDR = c.Services.KubeController.ClusterCIDR
|
2017-11-08 00:32:55 +00:00
|
|
|
c.ClusterDNSServer = c.Services.Kubelet.ClusterDNSServer
|
2018-11-02 05:53:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-30 18:15:14 +00:00
|
|
|
|
2018-11-07 23:54:08 +00:00
|
|
|
func (c *Cluster) SetupDialers(ctx context.Context, dailersOptions hosts.DialersOptions) error {
|
|
|
|
c.DockerDialerFactory = dailersOptions.DockerDialerFactory
|
|
|
|
c.LocalConnDialerFactory = dailersOptions.LocalConnDialerFactory
|
|
|
|
c.K8sWrapTransport = dailersOptions.K8sWrapTransport
|
2018-05-08 22:30:50 +00:00
|
|
|
// Create k8s wrap transport for bastion host
|
|
|
|
if len(c.BastionHost.Address) > 0 {
|
2018-06-25 19:01:02 +00:00
|
|
|
var err error
|
|
|
|
c.K8sWrapTransport, err = hosts.BastionHostWrapTransport(c.BastionHost)
|
|
|
|
if err != nil {
|
2018-11-02 05:53:29 +00:00
|
|
|
return err
|
2018-06-25 19:01:02 +00:00
|
|
|
}
|
2018-05-08 22:30:50 +00:00
|
|
|
}
|
2018-11-02 05:53:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-11-07 00:24:49 +00:00
|
|
|
func RebuildKubeconfig(ctx context.Context, kubeCluster *Cluster) error {
|
|
|
|
return rebuildLocalAdminConfig(ctx, kubeCluster)
|
|
|
|
}
|
|
|
|
|
2018-01-09 22:10:56 +00:00
|
|
|
func rebuildLocalAdminConfig(ctx context.Context, kubeCluster *Cluster) error {
|
2018-02-15 03:25:36 +00:00
|
|
|
if len(kubeCluster.ControlPlaneHosts) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-09 22:10:56 +00:00
|
|
|
log.Infof(ctx, "[reconcile] Rebuilding and updating local kube config")
|
2017-12-06 02:22:50 +00:00
|
|
|
var workingConfig, newConfig string
|
2018-01-16 23:10:14 +00:00
|
|
|
currentKubeConfig := kubeCluster.Certificates[pki.KubeAdminCertName]
|
2017-11-30 23:16:45 +00:00
|
|
|
caCrt := kubeCluster.Certificates[pki.CACertName].Certificate
|
|
|
|
for _, cpHost := range kubeCluster.ControlPlaneHosts {
|
2017-12-06 02:22:50 +00:00
|
|
|
if (currentKubeConfig == pki.CertificatePKI{}) {
|
|
|
|
kubeCluster.Certificates = make(map[string]pki.CertificatePKI)
|
2018-03-14 00:18:07 +00:00
|
|
|
newConfig = getLocalAdminConfigWithNewAddress(kubeCluster.LocalKubeConfigPath, cpHost.Address, kubeCluster.ClusterName)
|
2017-12-06 02:22:50 +00:00
|
|
|
} else {
|
|
|
|
kubeURL := fmt.Sprintf("https://%s:6443", cpHost.Address)
|
|
|
|
caData := string(cert.EncodeCertPEM(caCrt))
|
|
|
|
crtData := string(cert.EncodeCertPEM(currentKubeConfig.Certificate))
|
|
|
|
keyData := string(cert.EncodePrivateKeyPEM(currentKubeConfig.Key))
|
2018-03-14 00:18:07 +00:00
|
|
|
newConfig = pki.GetKubeConfigX509WithData(kubeURL, kubeCluster.ClusterName, pki.KubeAdminCertName, caData, crtData, keyData)
|
2017-12-06 02:22:50 +00:00
|
|
|
}
|
2018-01-09 22:10:56 +00:00
|
|
|
if err := pki.DeployAdminConfig(ctx, newConfig, kubeCluster.LocalKubeConfigPath); err != nil {
|
2019-08-21 08:19:07 +00:00
|
|
|
return fmt.Errorf("Failed to redeploy local admin config with new host: %v", err)
|
2017-11-21 19:25:08 +00:00
|
|
|
}
|
2017-11-30 23:16:45 +00:00
|
|
|
workingConfig = newConfig
|
2018-02-20 11:51:57 +00:00
|
|
|
if _, err := GetK8sVersion(kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err == nil {
|
2018-01-09 22:10:56 +00:00
|
|
|
log.Infof(ctx, "[reconcile] host [%s] is active master on the cluster", cpHost.Address)
|
2017-11-30 23:16:45 +00:00
|
|
|
break
|
2017-11-26 18:23:06 +00:00
|
|
|
}
|
2017-11-17 00:45:51 +00:00
|
|
|
}
|
2017-11-30 23:16:45 +00:00
|
|
|
currentKubeConfig.Config = workingConfig
|
2018-01-16 23:10:14 +00:00
|
|
|
kubeCluster.Certificates[pki.KubeAdminCertName] = currentKubeConfig
|
2017-11-17 00:45:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-30 23:16:45 +00:00
|
|
|
func getLocalConfigAddress(localConfigPath string) (string, error) {
|
|
|
|
config, err := clientcmd.BuildConfigFromFlags("", localConfigPath)
|
2017-11-17 00:45:51 +00:00
|
|
|
if err != nil {
|
2017-11-30 23:16:45 +00:00
|
|
|
return "", err
|
2017-11-17 00:45:51 +00:00
|
|
|
}
|
2017-11-30 23:16:45 +00:00
|
|
|
splittedAdress := strings.Split(config.Host, ":")
|
|
|
|
address := splittedAdress[1]
|
|
|
|
return address[2:], nil
|
2017-11-17 00:45:51 +00:00
|
|
|
}
|
2017-12-06 02:22:50 +00:00
|
|
|
|
2018-03-14 00:18:07 +00:00
|
|
|
func getLocalAdminConfigWithNewAddress(localConfigPath, cpAddress string, clusterName string) string {
|
2017-12-06 02:22:50 +00:00
|
|
|
config, _ := clientcmd.BuildConfigFromFlags("", localConfigPath)
|
2019-04-02 18:21:45 +00:00
|
|
|
if config == nil || config.BearerToken != "" {
|
2018-02-15 03:25:36 +00:00
|
|
|
return ""
|
|
|
|
}
|
2017-12-06 02:22:50 +00:00
|
|
|
config.Host = fmt.Sprintf("https://%s:6443", cpAddress)
|
|
|
|
return pki.GetKubeConfigX509WithData(
|
|
|
|
"https://"+cpAddress+":6443",
|
2018-03-14 00:18:07 +00:00
|
|
|
clusterName,
|
2018-01-16 23:10:14 +00:00
|
|
|
pki.KubeAdminCertName,
|
2017-12-06 02:22:50 +00:00
|
|
|
string(config.CAData),
|
|
|
|
string(config.CertData),
|
|
|
|
string(config.KeyData))
|
|
|
|
}
|
2017-12-14 21:56:19 +00:00
|
|
|
|
2018-11-07 23:54:08 +00:00
|
|
|
func ApplyAuthzResources(ctx context.Context, rkeConfig v3.RancherKubernetesEngineConfig, flags ExternalFlags, dailersOptions hosts.DialersOptions) error {
|
2018-02-26 21:14:04 +00:00
|
|
|
// dialer factories are not needed here since we are not uses docker only k8s jobs
|
2019-10-03 01:56:39 +00:00
|
|
|
kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags, "")
|
2018-02-26 21:14:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-11-07 23:54:08 +00:00
|
|
|
if err := kubeCluster.SetupDialers(ctx, dailersOptions); err != nil {
|
2018-11-03 01:45:23 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-02-26 21:14:04 +00:00
|
|
|
if len(kubeCluster.ControlPlaneHosts) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2019-08-16 11:49:49 +00:00
|
|
|
// Print proxy environment variables as we are directly contacting the cluster
|
|
|
|
util.PrintProxyEnvVars()
|
2018-02-26 21:14:04 +00:00
|
|
|
if err := authz.ApplyJobDeployerServiceAccount(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
|
2017-12-14 21:56:19 +00:00
|
|
|
return fmt.Errorf("Failed to apply the ServiceAccount needed for job execution: %v", err)
|
|
|
|
}
|
2018-02-26 21:14:04 +00:00
|
|
|
if kubeCluster.Authorization.Mode == NoneAuthorizationMode {
|
2017-12-14 21:56:19 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-02-26 21:14:04 +00:00
|
|
|
if kubeCluster.Authorization.Mode == services.RBACAuthorizationMode {
|
|
|
|
if err := authz.ApplySystemNodeClusterRoleBinding(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
|
2017-12-14 21:56:19 +00:00
|
|
|
return fmt.Errorf("Failed to apply the ClusterRoleBinding needed for node authorization: %v", err)
|
|
|
|
}
|
2019-07-25 20:07:38 +00:00
|
|
|
if err := authz.ApplyKubeAPIClusterRole(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
|
|
|
|
return fmt.Errorf("Failed to apply the ClusterRole and Binding needed for node kubeapi proxy: %v", err)
|
|
|
|
}
|
2017-12-14 21:56:19 +00:00
|
|
|
}
|
2018-02-26 21:14:04 +00:00
|
|
|
if kubeCluster.Authorization.Mode == services.RBACAuthorizationMode && kubeCluster.Services.KubeAPI.PodSecurityPolicy {
|
|
|
|
if err := authz.ApplyDefaultPodSecurityPolicy(ctx, kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport); err != nil {
|
2017-12-20 01:51:07 +00:00
|
|
|
return fmt.Errorf("Failed to apply default PodSecurityPolicy: %v", err)
|
|
|
|
}
|
2019-08-01 22:35:56 +00:00
|
|
|
if err := authz.ApplyDefaultPodSecurityPolicyRole(ctx, kubeCluster.LocalKubeConfigPath, SystemNamespace, kubeCluster.K8sWrapTransport); err != nil {
|
2017-12-20 01:51:07 +00:00
|
|
|
return fmt.Errorf("Failed to apply default PodSecurityPolicy ClusterRole and ClusterRoleBinding: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2017-12-14 21:56:19 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-16 18:29:09 +00:00
|
|
|
|
2019-06-17 20:52:15 +00:00
|
|
|
func (c *Cluster) deployAddons(ctx context.Context, data map[string]interface{}) error {
|
|
|
|
if err := c.deployK8sAddOns(ctx, data); err != nil {
|
2018-02-01 21:28:31 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-05-07 21:51:09 +00:00
|
|
|
if err := c.deployUserAddOns(ctx); err != nil {
|
|
|
|
if err, ok := err.(*addonError); ok && err.isCritical {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", UserAddonsIncludeResourceName, err)
|
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
2018-02-01 21:28:31 +00:00
|
|
|
}
|
|
|
|
|
2018-09-26 23:26:20 +00:00
|
|
|
func (c *Cluster) SyncLabelsAndTaints(ctx context.Context, currentCluster *Cluster) error {
|
|
|
|
// Handle issue when deleting all controlplane nodes https://github.com/rancher/rancher/issues/15810
|
|
|
|
if currentCluster != nil {
|
2019-03-04 16:59:43 +00:00
|
|
|
cpToDelete := hosts.GetToDeleteHosts(currentCluster.ControlPlaneHosts, c.ControlPlaneHosts, c.InactiveHosts, false)
|
2018-09-26 23:26:20 +00:00
|
|
|
if len(cpToDelete) == len(currentCluster.ControlPlaneHosts) {
|
2019-09-02 10:30:07 +00:00
|
|
|
log.Infof(ctx, "[sync] Cleaning left control plane nodes from reconciliation")
|
2018-09-26 23:26:20 +00:00
|
|
|
for _, toDeleteHost := range cpToDelete {
|
|
|
|
if err := cleanControlNode(ctx, c, currentCluster, toDeleteHost); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-01 14:18:20 +00:00
|
|
|
// sync node taints. Add or remove taints from hosts
|
|
|
|
syncTaints(ctx, currentCluster, c)
|
|
|
|
|
2018-02-15 03:25:36 +00:00
|
|
|
if len(c.ControlPlaneHosts) > 0 {
|
|
|
|
log.Infof(ctx, "[sync] Syncing nodes Labels and Taints")
|
2018-02-20 11:51:57 +00:00
|
|
|
k8sClient, err := k8s.NewClient(c.LocalKubeConfigPath, c.K8sWrapTransport)
|
2018-02-15 03:25:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to initialize new kubernetes client: %v", err)
|
2018-02-01 21:28:31 +00:00
|
|
|
}
|
2018-10-16 21:52:15 +00:00
|
|
|
hostList := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
|
|
|
var errgrp errgroup.Group
|
|
|
|
hostQueue := make(chan *hosts.Host, len(hostList))
|
|
|
|
for _, host := range hostList {
|
|
|
|
hostQueue <- host
|
|
|
|
}
|
|
|
|
close(hostQueue)
|
|
|
|
|
|
|
|
for i := 0; i < SyncWorkers; i++ {
|
|
|
|
w := i
|
|
|
|
errgrp.Go(func() error {
|
|
|
|
var errs []error
|
|
|
|
for host := range hostQueue {
|
|
|
|
logrus.Debugf("worker [%d] starting sync for node [%s]", w, host.HostnameOverride)
|
|
|
|
if err := setNodeAnnotationsLabelsTaints(k8sClient, host); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf("%v", errs)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if err := errgrp.Wait(); err != nil {
|
|
|
|
return err
|
2018-02-01 21:28:31 +00:00
|
|
|
}
|
2018-02-15 03:25:36 +00:00
|
|
|
log.Infof(ctx, "[sync] Successfully synced nodes Labels and Taints")
|
2018-02-01 21:28:31 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-02-01 21:43:09 +00:00
|
|
|
|
2018-10-16 21:52:15 +00:00
|
|
|
func setNodeAnnotationsLabelsTaints(k8sClient *kubernetes.Clientset, host *hosts.Host) error {
|
|
|
|
node := &v1.Node{}
|
|
|
|
var err error
|
|
|
|
for retries := 0; retries <= 5; retries++ {
|
|
|
|
node, err = k8s.GetNode(k8sClient, host.HostnameOverride)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("[hosts] Can't find node by name [%s], retrying..", host.HostnameOverride)
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
oldNode := node.DeepCopy()
|
|
|
|
k8s.SetNodeAddressesAnnotations(node, host.InternalAddress, host.Address)
|
|
|
|
k8s.SyncNodeLabels(node, host.ToAddLabels, host.ToDelLabels)
|
|
|
|
k8s.SyncNodeTaints(node, host.ToAddTaints, host.ToDelTaints)
|
|
|
|
|
|
|
|
if reflect.DeepEqual(oldNode, node) {
|
|
|
|
logrus.Debugf("skipping syncing labels for node [%s]", node.Name)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_, err = k8sClient.CoreV1().Nodes().Update(node)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Error syncing labels for node [%s]: %v", node.Name, err)
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-02-01 21:43:09 +00:00
|
|
|
func (c *Cluster) PrePullK8sImages(ctx context.Context) error {
|
|
|
|
log.Infof(ctx, "Pre-pulling kubernetes images")
|
|
|
|
var errgrp errgroup.Group
|
2018-10-17 22:26:54 +00:00
|
|
|
hostList := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
|
|
|
hostsQueue := util.GetObjectQueue(hostList)
|
|
|
|
for w := 0; w < WorkerThreads; w++ {
|
2018-02-01 21:43:09 +00:00
|
|
|
errgrp.Go(func() error {
|
2018-10-17 22:26:54 +00:00
|
|
|
var errList []error
|
|
|
|
for host := range hostsQueue {
|
|
|
|
runHost := host.(*hosts.Host)
|
|
|
|
err := docker.UseLocalOrPull(ctx, runHost.DClient, runHost.Address, c.SystemImages.Kubernetes, "pre-deploy", c.PrivateRegistriesMap)
|
|
|
|
if err != nil {
|
|
|
|
errList = append(errList, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return util.ErrList(errList)
|
2018-02-01 21:43:09 +00:00
|
|
|
})
|
|
|
|
}
|
2018-10-17 22:26:54 +00:00
|
|
|
|
2018-02-01 21:43:09 +00:00
|
|
|
if err := errgrp.Wait(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Infof(ctx, "Kubernetes images pulled successfully")
|
|
|
|
return nil
|
|
|
|
}
|
2018-02-08 03:13:06 +00:00
|
|
|
|
2018-03-29 20:58:46 +00:00
|
|
|
func ConfigureCluster(
|
|
|
|
ctx context.Context,
|
|
|
|
rkeConfig v3.RancherKubernetesEngineConfig,
|
|
|
|
crtBundle map[string]pki.CertificatePKI,
|
2018-11-07 23:54:08 +00:00
|
|
|
flags ExternalFlags,
|
|
|
|
dailersOptions hosts.DialersOptions,
|
2019-06-17 20:52:15 +00:00
|
|
|
data map[string]interface{},
|
2018-03-29 20:58:46 +00:00
|
|
|
useKubectl bool) error {
|
2018-02-08 03:13:06 +00:00
|
|
|
// dialer factories are not needed here since we are not uses docker only k8s jobs
|
2019-10-03 01:56:39 +00:00
|
|
|
kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags, "")
|
2018-02-08 03:13:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-11-07 23:54:08 +00:00
|
|
|
if err := kubeCluster.SetupDialers(ctx, dailersOptions); err != nil {
|
2018-11-03 01:45:23 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-03-01 21:32:25 +00:00
|
|
|
kubeCluster.UseKubectlDeploy = useKubectl
|
2018-02-15 03:25:36 +00:00
|
|
|
if len(kubeCluster.ControlPlaneHosts) > 0 {
|
|
|
|
kubeCluster.Certificates = crtBundle
|
2019-06-17 20:52:15 +00:00
|
|
|
if err := kubeCluster.deployNetworkPlugin(ctx, data); err != nil {
|
2018-05-07 21:51:09 +00:00
|
|
|
if err, ok := err.(*addonError); ok && err.isCritical {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", NetworkPluginResourceName, err)
|
|
|
|
}
|
2019-06-17 20:52:15 +00:00
|
|
|
if err := kubeCluster.deployAddons(ctx, data); err != nil {
|
2018-02-15 03:25:36 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-02-08 03:13:06 +00:00
|
|
|
}
|
2018-02-15 03:25:36 +00:00
|
|
|
return nil
|
2018-02-08 03:13:06 +00:00
|
|
|
}
|
2018-08-20 04:37:04 +00:00
|
|
|
|
|
|
|
func RestartClusterPods(ctx context.Context, kubeCluster *Cluster) error {
|
|
|
|
log.Infof(ctx, "Restarting network, ingress, and metrics pods")
|
|
|
|
// this will remove the pods created by RKE and let the controller creates them again
|
|
|
|
kubeClient, err := k8s.NewClient(kubeCluster.LocalKubeConfigPath, kubeCluster.K8sWrapTransport)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to initialize new kubernetes client: %v", err)
|
|
|
|
}
|
|
|
|
labelsList := []string{
|
|
|
|
fmt.Sprintf("%s=%s", KubeAppLabel, FlannelNetworkPlugin),
|
|
|
|
fmt.Sprintf("%s=%s", KubeAppLabel, CanalNetworkPlugin),
|
2019-08-08 17:10:44 +00:00
|
|
|
fmt.Sprintf("%s=%s", NameLabel, WeaveNetworkAppName),
|
2018-08-20 04:37:04 +00:00
|
|
|
fmt.Sprintf("%s=%s", AppLabel, NginxIngressAddonAppName),
|
|
|
|
fmt.Sprintf("%s=%s", KubeAppLabel, DefaultMonitoringProvider),
|
|
|
|
fmt.Sprintf("%s=%s", KubeAppLabel, KubeDNSAddonAppName),
|
|
|
|
fmt.Sprintf("%s=%s", KubeAppLabel, KubeDNSAutoscalerAppName),
|
2018-07-22 09:36:21 +00:00
|
|
|
fmt.Sprintf("%s=%s", KubeAppLabel, CoreDNSAutoscalerAppName),
|
2019-03-14 04:16:09 +00:00
|
|
|
fmt.Sprintf("%s=%s", AppLabel, KubeAPIAuthAppName),
|
2019-03-19 19:33:21 +00:00
|
|
|
fmt.Sprintf("%s=%s", AppLabel, CattleClusterAgentAppName),
|
2018-08-20 04:37:04 +00:00
|
|
|
}
|
2019-08-08 17:10:44 +00:00
|
|
|
for _, calicoLabel := range CalicoNetworkLabels {
|
|
|
|
labelsList = append(labelsList, fmt.Sprintf("%s=%s", KubeAppLabel, calicoLabel))
|
|
|
|
}
|
2018-08-20 04:37:04 +00:00
|
|
|
var errgrp errgroup.Group
|
|
|
|
labelQueue := util.GetObjectQueue(labelsList)
|
|
|
|
for w := 0; w < services.WorkerThreads; w++ {
|
|
|
|
errgrp.Go(func() error {
|
|
|
|
var errList []error
|
|
|
|
for label := range labelQueue {
|
|
|
|
runLabel := label.(string)
|
|
|
|
// list pods to be deleted
|
|
|
|
pods, err := k8s.ListPodsByLabel(kubeClient, runLabel)
|
|
|
|
if err != nil {
|
|
|
|
errList = append(errList, err)
|
|
|
|
}
|
|
|
|
// delete pods
|
|
|
|
err = k8s.DeletePods(kubeClient, pods)
|
|
|
|
if err != nil {
|
|
|
|
errList = append(errList, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return util.ErrList(errList)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if err := errgrp.Wait(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-31 23:11:57 +00:00
|
|
|
|
|
|
|
func (c *Cluster) GetHostInfoMap() map[string]types.Info {
|
|
|
|
hostsInfoMap := make(map[string]types.Info)
|
|
|
|
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
|
|
|
for _, host := range allHosts {
|
|
|
|
hostsInfoMap[host.Address] = host.DockerInfo
|
|
|
|
}
|
|
|
|
return hostsInfoMap
|
|
|
|
}
|
2019-03-09 02:09:16 +00:00
|
|
|
|
|
|
|
func IsLegacyKubeAPI(ctx context.Context, kubeCluster *Cluster) (bool, error) {
|
|
|
|
log.Infof(ctx, "[controlplane] Check if rotating a legacy cluster")
|
|
|
|
for _, host := range kubeCluster.ControlPlaneHosts {
|
|
|
|
kubeAPIInspect, err := docker.InspectContainer(ctx, host.DClient, host.Address, services.KubeAPIContainerName)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
for _, arg := range kubeAPIInspect.Args {
|
|
|
|
if strings.Contains(arg, serviceAccountTokenFileParam) &&
|
|
|
|
strings.Contains(arg, pki.GetKeyPath(pki.KubeAPICertName)) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|