diff --git a/cluster/certificates.go b/cluster/certificates.go index e5cfdbca..a330ff28 100644 --- a/cluster/certificates.go +++ b/cluster/certificates.go @@ -50,7 +50,7 @@ func GetClusterCertsFromKubernetes(ctx context.Context, kubeCluster *Cluster) (m certMap := make(map[string]pki.CertificatePKI) for _, certName := range certificatesNames { - secret, err := k8s.GetSecret(k8sClient, certName) + secret, err := k8s.GetSystemSecret(k8sClient, certName) if err != nil && !strings.HasPrefix(certName, "kube-etcd") && !strings.Contains(certName, pki.RequestHeaderCACertName) && !strings.Contains(certName, pki.APIProxyClientCertName) && diff --git a/cluster/cluster.go b/cluster/cluster.go index 3f751f7c..62164649 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -58,6 +58,13 @@ type Cluster struct { UseKubectlDeploy bool v3.RancherKubernetesEngineConfig `yaml:",inline"` WorkerHosts []*hosts.Host + EncryptionConfig encryptionConfig +} + +type encryptionConfig struct { + RewriteSecrets bool + RotateKey bool + EncryptionProviderFile string } const ( @@ -147,14 +154,28 @@ func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[strin func ParseConfig(clusterFile string) (*v3.RancherKubernetesEngineConfig, error) { logrus.Debugf("Parsing cluster file [%v]", clusterFile) var rkeConfig v3.RancherKubernetesEngineConfig + + // the customConfig is mapped to a k8s type, which doesn't unmarshal well because it has a + // nested struct and no yaml tags. Therefor, we have to re-parse it again and assign it correctly. + // this only affects rke cli. Since rkeConfig is passed from rancher directly in the rancher use case. + clusterFile, secretConfig, err := resolvCustomEncryptionConfig(clusterFile) + if err != nil { + return nil, err + } + if err := yaml.Unmarshal([]byte(clusterFile), &rkeConfig); err != nil { return nil, err } + + if isEncryptionEnabled(&rkeConfig) && secretConfig != nil { + rkeConfig.Services.KubeAPI.SecretsEncryptionConfig.CustomConfig = secretConfig + } return &rkeConfig, nil } -func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, flags ExternalFlags) (*Cluster, error) { +func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, flags ExternalFlags, encryptConfig string) (*Cluster, error) { // basic cluster object from rkeConfig + var err error c := &Cluster{ AuthnStrategies: make(map[string]bool), RancherKubernetesEngineConfig: *rkeConfig, @@ -164,6 +185,9 @@ func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngin CertificateDir: flags.CertificateDir, StateFilePath: GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir), PrivateRegistriesMap: make(map[string]v3.PrivateRegistry), + EncryptionConfig: encryptionConfig{ + EncryptionProviderFile: encryptConfig, + }, } if metadata.K8sVersionToRKESystemImages == nil { metadata.InitMetadata(ctx) @@ -177,9 +201,19 @@ func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngin if len(c.CertificateDir) == 0 { c.CertificateDir = GetCertificateDirPath(c.ConfigPath, c.ConfigDir) } + // We don't manage custom configuration, if it's there we just use it. + if isEncryptionCustomConfig(rkeConfig) { + if c.EncryptionConfig.EncryptionProviderFile, err = c.readEncryptionCustomConfig(); err != nil { + return nil, err + } + } else if isEncryptionEnabled(rkeConfig) && c.EncryptionConfig.EncryptionProviderFile == "" { + if c.EncryptionConfig.EncryptionProviderFile, err = c.getEncryptionProviderFile(); err != nil { + return nil, err + } + } // Setting cluster Defaults - err := c.setClusterDefaults(ctx, flags) + err = c.setClusterDefaults(ctx, flags) if err != nil { return nil, err } @@ -294,7 +328,7 @@ func getLocalAdminConfigWithNewAddress(localConfigPath, cpAddress string, cluste func ApplyAuthzResources(ctx context.Context, rkeConfig v3.RancherKubernetesEngineConfig, flags ExternalFlags, dailersOptions hosts.DialersOptions) error { // dialer factories are not needed here since we are not uses docker only k8s jobs - kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags) + kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags, "") if err != nil { return err } @@ -466,7 +500,7 @@ func ConfigureCluster( data map[string]interface{}, useKubectl bool) error { // dialer factories are not needed here since we are not uses docker only k8s jobs - kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags) + kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags, "") if err != nil { return err } diff --git a/cluster/encryption.go b/cluster/encryption.go new file mode 100644 index 00000000..2aa82e4e --- /dev/null +++ b/cluster/encryption.go @@ -0,0 +1,392 @@ +package cluster + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + + ghodssyaml "github.com/ghodss/yaml" + normantypes "github.com/rancher/norman/types" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" + sigsyaml "sigs.k8s.io/yaml" + + "github.com/rancher/rke/k8s" + "github.com/rancher/rke/log" + "github.com/rancher/rke/services" + "github.com/rancher/rke/templates" + "github.com/rancher/rke/util" + "github.com/rancher/types/apis/management.cattle.io/v3" + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + apiserverconfig "k8s.io/apiserver/pkg/apis/config" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + EncryptionProviderFilePath = "/etc/kubernetes/ssl/encryption.yaml" +) + +type encryptionKey struct { + Name string + Secret string +} +type keyList struct { + KeyList []*encryptionKey +} + +func ReconcileEncryptionProviderConfig(ctx context.Context, kubeCluster, currentCluster *Cluster) error { + log.Infof(ctx, "[%s] Reconciling cluster's encryption provider configuration..", services.ControlRole) + if len(kubeCluster.ControlPlaneHosts) == 0 { + return nil + } + // New or existing cluster deployment with encryption enabled. We will rewrite the secrets after deploying the addons. + if (currentCluster == nil || !currentCluster.IsEncryptionEnabled()) && + kubeCluster.IsEncryptionEnabled() { + kubeCluster.EncryptionConfig.RewriteSecrets = true + return nil + } + // encryption is disabled + if !kubeCluster.IsEncryptionEnabled() && !currentCluster.IsEncryptionEnabled() { + return nil + } + // disable encryption + if !kubeCluster.IsEncryptionEnabled() && currentCluster.IsEncryptionEnabled() { + if currentCluster.IsEncryptionCustomConfig() { + // KubeAPI will be restarted for the last time during controlplane redeployment, since the + // Configuration file is now empty, the Process Plan will change. + kubeCluster.EncryptionConfig.EncryptionProviderFile = "" + return kubeCluster.DeployEncryptionProviderFile(ctx) + } + return kubeCluster.DisableSecretsEncryption(ctx, currentCluster) + } + return nil +} + +func (c *Cluster) DisableSecretsEncryption(ctx context.Context, currentCluster *Cluster) error { + log.Infof(ctx, "[%s] Disabling Secrets Encryption..", services.ControlRole) + + if len(c.ControlPlaneHosts) == 0 { + return nil + } + var err error + c.EncryptionConfig.EncryptionProviderFile, err = currentCluster.generateDisabledEncryptionProviderFile() + if err != nil { + return err + } + logrus.Debugf("[%s] Deploying Identity first Encryption Provider Configuration", services.ControlRole) + if err := c.DeployEncryptionProviderFile(ctx); err != nil { + return err + } + if err := services.RestartKubeAPIWithHealthcheck(ctx, c.ControlPlaneHosts, c.LocalConnDialerFactory, c.Certificates); err != nil { + return err + } + if err := c.RewriteSecrets(ctx); err != nil { + return err + } + // KubeAPI will be restarted for the last time during controlplane redeployment, since the + // Configuration file is now empty, the Process Plan will change. + c.EncryptionConfig.EncryptionProviderFile = "" + if err := c.DeployEncryptionProviderFile(ctx); err != nil { + return err + } + log.Infof(ctx, "[%s] Secrets Encryption disabled successfully", services.ControlRole) + return nil +} + +func (c *Cluster) RewriteSecrets(ctx context.Context) error { + log.Infof(ctx, "Rewriting cluster secrets") + var errgrp errgroup.Group + k8sClient, err := k8s.NewClient(c.LocalKubeConfigPath, c.K8sWrapTransport) + if err != nil { + return fmt.Errorf("Failed to initialize new kubernetes client: %v", err) + } + secretsList, err := k8s.GetSecretsList(k8sClient, "") + if err != nil { + return err + } + + secretsQueue := util.GetObjectQueue(secretsList.Items) + for w := 0; w < SyncWorkers; w++ { + errgrp.Go(func() error { + var errList []error + for secret := range secretsQueue { + s := secret.(v1.Secret) + err := rewriteSecret(k8sClient, &s) + if err != nil { + errList = append(errList, err) + } + } + return util.ErrList(errList) + }) + } + if err := errgrp.Wait(); err != nil { + return err + } + log.Infof(ctx, "Cluster secrets rewritten successfully") + return nil +} + +func (c *Cluster) RotateEncryptionKey(ctx context.Context, fullState *FullState) error { + //generate new key + newKey, err := generateEncryptionKey() + if err != nil { + return err + } + oldKey, err := c.extractActiveKey(c.EncryptionConfig.EncryptionProviderFile) + if err != nil { + return err + } + // reverse the keys order in the file, making newKey the Active Key + initialKeyList := []*encryptionKey{ // order is critical here! + newKey, + oldKey, + } + initialProviderConfig, err := providerFileFromKeyList(keyList{KeyList: initialKeyList}) + if err != nil { + return err + } + c.EncryptionConfig.EncryptionProviderFile = initialProviderConfig + if err := c.DeployEncryptionProviderFile(ctx); err != nil { + return err + } + // commit to state as soon as possible + logrus.Debugf("[%s] Updating cluster state", services.ControlRole) + if err := c.UpdateClusterCurrentState(ctx, fullState); err != nil { + return err + } + if err := services.RestartKubeAPIWithHealthcheck(ctx, c.ControlPlaneHosts, c.LocalConnDialerFactory, c.Certificates); err != nil { + return err + } + // rewrite secrets + if err := c.RewriteSecrets(ctx); err != nil { + return err + } + // At this point, all secrets have been rewritten using the newKey, so we remove the old one. + finalKeyList := []*encryptionKey{ + newKey, + } + finalProviderConfig, err := providerFileFromKeyList(keyList{KeyList: finalKeyList}) + if err != nil { + return err + } + c.EncryptionConfig.EncryptionProviderFile = finalProviderConfig + if err := c.DeployEncryptionProviderFile(ctx); err != nil { + return err + } + // commit to state + logrus.Debugf("[%s] Updating cluster state", services.ControlRole) + if err := c.UpdateClusterCurrentState(ctx, fullState); err != nil { + return err + } + if err := services.RestartKubeAPIWithHealthcheck(ctx, c.ControlPlaneHosts, c.LocalConnDialerFactory, c.Certificates); err != nil { + return err + } + return nil +} + +func (c *Cluster) DeployEncryptionProviderFile(ctx context.Context) error { + logrus.Debugf("[%s] Deploying Encryption Provider Configuration file on Control Plane nodes..", services.ControlRole) + return deployFile(ctx, c.ControlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, EncryptionProviderFilePath, c.EncryptionConfig.EncryptionProviderFile) +} + +// ReconcileDesiredStateEncryptionConfig We do the rotation outside of the cluster reconcile logic. When we are done, +// DesiredState needs to be updated to reflect the "new" configuration +func (c *Cluster) ReconcileDesiredStateEncryptionConfig(ctx context.Context, fullState *FullState) error { + fullState.DesiredState.EncryptionConfig = c.EncryptionConfig.EncryptionProviderFile + return fullState.WriteStateFile(ctx, c.StateFilePath) +} + +func (c *Cluster) IsEncryptionEnabled() bool { + if c == nil { + return false + } + if c.Services.KubeAPI.SecretsEncryptionConfig != nil && + c.Services.KubeAPI.SecretsEncryptionConfig.Enabled { + return true + } + return false +} + +func (c *Cluster) IsEncryptionCustomConfig() bool { + if c.IsEncryptionEnabled() && + c.Services.KubeAPI.SecretsEncryptionConfig.CustomConfig != nil { + return true + } + return false +} + +func (c *Cluster) getEncryptionProviderFile() (string, error) { + if c.EncryptionConfig.EncryptionProviderFile != "" { + return c.EncryptionConfig.EncryptionProviderFile, nil + } + key, err := generateEncryptionKey() + if err != nil { + return "", err + } + c.EncryptionConfig.EncryptionProviderFile, err = providerFileFromKeyList(keyList{KeyList: []*encryptionKey{key}}) + return c.EncryptionConfig.EncryptionProviderFile, err +} + +func (c *Cluster) extractActiveKey(s string) (*encryptionKey, error) { + config := apiserverconfig.EncryptionConfiguration{} + if err := k8s.DecodeYamlResource(&config, c.EncryptionConfig.EncryptionProviderFile); err != nil { + return nil, err + } + resource := config.Resources[0] + provider := resource.Providers[0] + return &encryptionKey{ + Name: provider.AESCBC.Keys[0].Name, + Secret: provider.AESCBC.Keys[0].Secret, + }, nil +} + +func (c *Cluster) generateDisabledEncryptionProviderFile() (string, error) { + key, err := c.extractActiveKey(c.EncryptionConfig.EncryptionProviderFile) + if err != nil { + return "", err + } + return disabledProviderFileFromKey(key) +} + +func rewriteSecret(k8sClient *kubernetes.Clientset, secret *v1.Secret) error { + var err error + if err = k8s.UpdateSecret(k8sClient, secret); err == nil { + return nil + } + if apierrors.IsConflict(err) { + secret, err = k8s.GetSecret(k8sClient, secret.Name, secret.Namespace) + if err != nil { + return err + } + err = k8s.UpdateSecret(k8sClient, secret) + } + return err +} + +func generateEncryptionKey() (*encryptionKey, error) { + // TODO: do this in a better way + buf := make([]byte, 16) + if _, err := rand.Read(buf); err != nil { + return nil, err + } + return &encryptionKey{ + Name: normantypes.GenerateName("key"), + Secret: base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%X", buf))), + }, nil +} + +func isEncryptionEnabled(rkeConfig *v3.RancherKubernetesEngineConfig) bool { + if rkeConfig.Services.KubeAPI.SecretsEncryptionConfig != nil && + rkeConfig.Services.KubeAPI.SecretsEncryptionConfig.Enabled { + return true + } + return false +} +func isEncryptionCustomConfig(rkeConfig *v3.RancherKubernetesEngineConfig) bool { + if isEncryptionEnabled(rkeConfig) && + rkeConfig.Services.KubeAPI.SecretsEncryptionConfig.CustomConfig != nil { + return true + } + return false +} + +func providerFileFromKeyList(keyList interface{}) (string, error) { + return templates.CompileTemplateFromMap(templates.MultiKeyEncryptionProviderFile, keyList) +} + +func disabledProviderFileFromKey(keyList interface{}) (string, error) { + return templates.CompileTemplateFromMap(templates.DisabledEncryptionProviderFile, keyList) +} + +func (c *Cluster) readEncryptionCustomConfig() (string, error) { + // directly marshalling apiserverconfig.EncryptionConfiguration to yaml breaks things because TypeMeta + // is nested and all fields don't have tags. apiserverconfigv1 has json tags only. So we do this as a work around. + + out := apiserverconfigv1.EncryptionConfiguration{} + err := apiserverconfigv1.Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration( + c.RancherKubernetesEngineConfig.Services.KubeAPI.SecretsEncryptionConfig.CustomConfig, &out, nil) + if err != nil { + return "", err + } + jsonConfig, err := json.Marshal(out) + if err != nil { + return "", err + } + yamlConfig, err := sigsyaml.JSONToYAML(jsonConfig) + if err != nil { + return "", nil + } + + return templates.CompileTemplateFromMap(templates.CustomEncryptionProviderFile, + struct{ CustomConfig string }{CustomConfig: string(yamlConfig)}) +} + +func resolvCustomEncryptionConfig(clusterFile string) (string, *apiserverconfig.EncryptionConfiguration, error) { + var err error + var r map[string]interface{} + err = ghodssyaml.Unmarshal([]byte(clusterFile), &r) + if err != nil { + return clusterFile, nil, fmt.Errorf("error unmarshalling: %v", err) + } + if err != nil { + return "", nil, fmt.Errorf("error unmarshalling encryption custom config: %v", err) + } + services, ok := r["services"].(map[string]interface{}) + if services == nil || !ok { + return clusterFile, nil, nil + } + kubeapi, ok := services["kube-api"].(map[string]interface{}) + if kubeapi == nil || !ok { + return clusterFile, nil, nil + } + sec, ok := kubeapi["secrets_encryption_config"].(map[string]interface{}) + if sec == nil || !ok { + return clusterFile, nil, nil + } + customConfig, ok := sec["custom_config"].(map[string]interface{}) + + if ok && customConfig != nil { + delete(sec, "custom_config") + newClusterFile, err := ghodssyaml.Marshal(r) + c, err := parseCustomConfig(customConfig) + return string(newClusterFile), c, err + } + return clusterFile, nil, nil +} + +func parseCustomConfig(customConfig map[string]interface{}) (*apiserverconfig.EncryptionConfiguration, error) { + var err error + + data, err := json.Marshal(customConfig) + if err != nil { + return nil, fmt.Errorf("error marshalling: %v", err) + } + scheme := runtime.NewScheme() + err = apiserverconfig.AddToScheme(scheme) + if err != nil { + return nil, fmt.Errorf("error adding to scheme: %v", err) + } + err = apiserverconfigv1.AddToScheme(scheme) + if err != nil { + return nil, fmt.Errorf("error adding to scheme: %v", err) + } + codecs := serializer.NewCodecFactory(scheme) + decoder := codecs.UniversalDecoder() + decodedObj, objType, err := decoder.Decode(data, nil, nil) + + if err != nil { + return nil, fmt.Errorf("error decoding data: %v", err) + } + + decodedConfig, ok := decodedObj.(*apiserverconfig.EncryptionConfiguration) + if !ok { + return nil, fmt.Errorf("unexpected type: %T", objType) + } + return decodedConfig, nil +} diff --git a/cluster/file-deployer.go b/cluster/file-deployer.go index 9029ca5b..9ff9fb7b 100644 --- a/cluster/file-deployer.go +++ b/cluster/file-deployer.go @@ -34,15 +34,27 @@ func doDeployFile(ctx context.Context, host *hosts.Host, fileName, fileContents, if err := docker.DoRemoveContainer(ctx, host.DClient, ContainerName, host.Address); err != nil { return err } - containerEnv := []string{ConfigEnv + "=" + fileContents} - imageCfg := &container.Config{ - Image: alpineImage, - Cmd: []string{ + var cmd, containerEnv []string + + if fileContents != "" { + containerEnv = []string{ConfigEnv + "=" + fileContents} + cmd = []string{ "sh", "-c", fmt.Sprintf("t=$(mktemp); echo -e \"$%s\" > $t && mv $t %s && chmod 600 %s", ConfigEnv, fileName, fileName), - }, - Env: containerEnv, + } + } else { + cmd = []string{ + "sh", + "-c", + fmt.Sprintf("rm -f %s", fileName), + } + } + + imageCfg := &container.Config{ + Image: alpineImage, + Cmd: cmd, + Env: containerEnv, } hostCfg := &container.HostConfig{ Binds: []string{ diff --git a/cluster/hosts.go b/cluster/hosts.go index 5036388b..9e434f87 100644 --- a/cluster/hosts.go +++ b/cluster/hosts.go @@ -157,6 +157,11 @@ func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error { } log.Infof(ctx, "[%s] Successfully deployed authentication webhook config Cluster nodes", authnWebhookFileName) } + if c.EncryptionConfig.EncryptionProviderFile != "" { + if err := c.DeployEncryptionProviderFile(ctx); err != nil { + return err + } + } } return nil } diff --git a/cluster/plan.go b/cluster/plan.go index e9664f88..f8872166 100644 --- a/cluster/plan.go +++ b/cluster/plan.go @@ -69,7 +69,7 @@ func GetServiceOptionData(data map[string]interface{}) map[string]*v3.Kubernetes func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, hostsInfoMap map[string]types.Info, data map[string]interface{}) (v3.RKEPlan, error) { clusterPlan := v3.RKEPlan{} - myCluster, err := InitClusterObject(ctx, rkeConfig, ExternalFlags{}) + myCluster, err := InitClusterObject(ctx, rkeConfig, ExternalFlags{}, "") if err != nil { return clusterPlan, err } @@ -109,15 +109,23 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts portChecks = append(portChecks, BuildPortChecksFromPortList(host, EtcdPortList, ProtocolTCP)...) } - cloudConfig := v3.File{ - Name: cloudConfigFileName, - Contents: b64.StdEncoding.EncodeToString([]byte(myCluster.CloudConfigFile)), + files := []v3.File{ + v3.File{ + Name: cloudConfigFileName, + Contents: b64.StdEncoding.EncodeToString([]byte(myCluster.CloudConfigFile)), + }, + } + if myCluster.IsEncryptionEnabled() { + files = append(files, v3.File{ + Name: EncryptionProviderFilePath, + Contents: b64.StdEncoding.EncodeToString([]byte(myCluster.EncryptionConfig.EncryptionProviderFile)), + }) } return v3.RKEConfigNodePlan{ Address: host.Address, Processes: osLimitationFilter(hostDockerInfo.OSType, processes), PortChecks: portChecks, - Files: []v3.File{cloudConfig}, + Files: files, Annotations: map[string]string{ k8s.ExternalAddressAnnotation: host.Address, k8s.InternalAddressAnnotation: host.InternalAddress, @@ -210,6 +218,10 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOp c.Services.KubeAPI.ExtraEnv, fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile))) } + if c.EncryptionConfig.EncryptionProviderFile != "" { + CommandArgs["experimental-encryption-provider-config"] = EncryptionProviderFilePath + } + serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData) if serviceOptions.KubeAPI != nil { for k, v := range serviceOptions.KubeAPI { diff --git a/cluster/reconcile.go b/cluster/reconcile.go index 30f598c3..b8ea60b1 100644 --- a/cluster/reconcile.go +++ b/cluster/reconcile.go @@ -66,7 +66,6 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, return err } } - log.Infof(ctx, "[reconcile] Reconciled cluster state successfully") return nil } diff --git a/cluster/state.go b/cluster/state.go index 4dfcdc4d..dfa9e14d 100644 --- a/cluster/state.go +++ b/cluster/state.go @@ -36,11 +36,13 @@ type FullState struct { type State struct { RancherKubernetesEngineConfig *v3.RancherKubernetesEngineConfig `json:"rkeConfig,omitempty"` CertificatesBundle map[string]pki.CertificatePKI `json:"certificatesBundle,omitempty"` + EncryptionConfig string `json:"encryptionConfig,omitempty"` } func (c *Cluster) UpdateClusterCurrentState(ctx context.Context, fullState *FullState) error { fullState.CurrentState.RancherKubernetesEngineConfig = c.RancherKubernetesEngineConfig.DeepCopy() fullState.CurrentState.CertificatesBundle = c.Certificates + fullState.CurrentState.EncryptionConfig = c.EncryptionConfig.EncryptionProviderFile return fullState.WriteStateFile(ctx, c.StateFilePath) } @@ -52,12 +54,12 @@ func (c *Cluster) GetClusterState(ctx context.Context, fullState *FullState) (*C // resetup external flags flags := GetExternalFlags(false, false, false, c.ConfigDir, c.ConfigPath) - currentCluster, err := InitClusterObject(ctx, fullState.CurrentState.RancherKubernetesEngineConfig, flags) + currentCluster, err := InitClusterObject(ctx, fullState.CurrentState.RancherKubernetesEngineConfig, flags, fullState.CurrentState.EncryptionConfig) if err != nil { return nil, err } currentCluster.Certificates = fullState.CurrentState.CertificatesBundle - + currentCluster.EncryptionConfig.EncryptionProviderFile = fullState.CurrentState.EncryptionConfig // resetup dialers dialerOptions := hosts.GetDialerOptions(c.DockerDialerFactory, c.LocalConnDialerFactory, c.K8sWrapTransport) if err := currentCluster.SetupDialers(ctx, dialerOptions); err != nil { @@ -147,7 +149,8 @@ func GetK8sVersion(localConfigPath string, k8sWrapTransport transport.WrapperFun return fmt.Sprintf("%#v", *serverVersion), nil } -func RebuildState(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, oldState *FullState, flags ExternalFlags) (*FullState, error) { +func RebuildState(ctx context.Context, kubeCluster *Cluster, oldState *FullState, flags ExternalFlags) (*FullState, error) { + rkeConfig := &kubeCluster.RancherKubernetesEngineConfig newState := &FullState{ DesiredState: State{ RancherKubernetesEngineConfig: rkeConfig.DeepCopy(), @@ -169,25 +172,14 @@ func RebuildState(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConf } // Rebuilding the certificates of the desired state - if oldState.DesiredState.CertificatesBundle == nil { - // Get the certificate Bundle - certBundle, err := pki.GenerateRKECerts(ctx, *rkeConfig, "", "") - if err != nil { - return nil, fmt.Errorf("Failed to generate certificate bundle: %v", err) - } - newState.DesiredState.CertificatesBundle = certBundle - } else { - pkiCertBundle := oldState.DesiredState.CertificatesBundle - // check for legacy clusters prior to requestheaderca - if pkiCertBundle[pki.RequestHeaderCACertName].Certificate == nil { - if err := pki.GenerateRKERequestHeaderCACert(ctx, pkiCertBundle, flags.ClusterFilePath, flags.ConfigDir); err != nil { - return nil, err - } - } - if err := pki.GenerateRKEServicesCerts(ctx, pkiCertBundle, *rkeConfig, flags.ClusterFilePath, flags.ConfigDir, false); err != nil { + if oldState.DesiredState.CertificatesBundle == nil { // this is a fresh cluster + if err := buildFreshState(ctx, kubeCluster, newState); err != nil { + return nil, err + } + } else { // This is an existing cluster with an old DesiredState + if err := rebuildExistingState(ctx, kubeCluster, oldState, newState, flags); err != nil { return nil, err } - newState.DesiredState.CertificatesBundle = pkiCertBundle } newState.CurrentState = oldState.CurrentState return newState, nil @@ -292,3 +284,46 @@ func GetStateFromNodes(ctx context.Context, kubeCluster *Cluster) *Cluster { log.Infof(ctx, "[state] Successfully fetched cluster state from Nodes") return ¤tCluster } + +func buildFreshState(ctx context.Context, kubeCluster *Cluster, newState *FullState) error { + rkeConfig := &kubeCluster.RancherKubernetesEngineConfig + // Get the certificate Bundle + certBundle, err := pki.GenerateRKECerts(ctx, *rkeConfig, "", "") + if err != nil { + return fmt.Errorf("Failed to generate certificate bundle: %v", err) + } + newState.DesiredState.CertificatesBundle = certBundle + if isEncryptionEnabled(rkeConfig) { + if newState.DesiredState.EncryptionConfig, err = kubeCluster.getEncryptionProviderFile(); err != nil { + return err + } + } + return nil +} + +func rebuildExistingState(ctx context.Context, kubeCluster *Cluster, oldState, newState *FullState, flags ExternalFlags) error { + rkeConfig := &kubeCluster.RancherKubernetesEngineConfig + pkiCertBundle := oldState.DesiredState.CertificatesBundle + // check for legacy clusters prior to requestheaderca + if pkiCertBundle[pki.RequestHeaderCACertName].Certificate == nil { + if err := pki.GenerateRKERequestHeaderCACert(ctx, pkiCertBundle, flags.ClusterFilePath, flags.ConfigDir); err != nil { + return err + } + } + if err := pki.GenerateRKEServicesCerts(ctx, pkiCertBundle, *rkeConfig, flags.ClusterFilePath, flags.ConfigDir, false); err != nil { + return err + } + newState.DesiredState.CertificatesBundle = pkiCertBundle + if isEncryptionEnabled(rkeConfig) { + if oldState.DesiredState.EncryptionConfig != "" { + newState.DesiredState.EncryptionConfig = oldState.DesiredState.EncryptionConfig + } else { + var err error + if newState.DesiredState.EncryptionConfig, err = kubeCluster.getEncryptionProviderFile(); err != nil { + return err + } + } + } + + return nil +} diff --git a/cmd/cert.go b/cmd/cert.go index 276d693a..fb3b06b9 100644 --- a/cmd/cert.go +++ b/cmd/cert.go @@ -136,7 +136,7 @@ func rebuildClusterWithRotatedCertificates(ctx context.Context, return APIURL, caCrt, clientCert, clientKey, nil, err } - kubeCluster, err := cluster.InitClusterObject(ctx, clusterState.DesiredState.RancherKubernetesEngineConfig.DeepCopy(), flags) + kubeCluster, err := cluster.InitClusterObject(ctx, clusterState.DesiredState.RancherKubernetesEngineConfig.DeepCopy(), flags, clusterState.DesiredState.EncryptionConfig) if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } @@ -231,7 +231,7 @@ func GenerateRKECSRs(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineC } // initialze the cluster object from the config file - kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, "") if err != nil { return err } diff --git a/cmd/common.go b/cmd/common.go index 820490aa..6711ad7b 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -74,10 +74,8 @@ func ClusterInit(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfi if len(flags.CertificateDir) == 0 { flags.CertificateDir = cluster.GetCertificateDirPath(flags.ClusterFilePath, flags.ConfigDir) } - rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath) - - kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, rkeFullState.DesiredState.EncryptionConfig) if err != nil { return err } @@ -93,16 +91,21 @@ func ClusterInit(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfi } log.Warnf(ctx, "[state] can't fetch legacy cluster state from Kubernetes: %v", err) } + // check if certificate rotate or normal init if kubeCluster.RancherKubernetesEngineConfig.RotateCertificates != nil { fullState, err = rotateRKECertificates(ctx, kubeCluster, flags, rkeFullState) } else { - fullState, err = cluster.RebuildState(ctx, &kubeCluster.RancherKubernetesEngineConfig, rkeFullState, flags) + fullState, err = cluster.RebuildState(ctx, kubeCluster, rkeFullState, flags) } if err != nil { return err } + if fullState.DesiredState.EncryptionConfig != "" { + kubeCluster.EncryptionConfig.EncryptionProviderFile = fullState.DesiredState.EncryptionConfig + } + rkeState := cluster.FullState{ DesiredState: fullState.DesiredState, CurrentState: fullState.CurrentState, diff --git a/cmd/encryption.go b/cmd/encryption.go new file mode 100644 index 00000000..e07d4d0a --- /dev/null +++ b/cmd/encryption.go @@ -0,0 +1,107 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/rancher/rke/cluster" + "github.com/rancher/rke/hosts" + "github.com/rancher/rke/log" + "github.com/rancher/rke/pki" + "github.com/rancher/types/apis/management.cattle.io/v3" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +func EncryptionCommand() cli.Command { + encryptFlags := []cli.Flag{ + cli.StringFlag{ + Name: "config", + Usage: "Specify an alternate cluster YAML file", + Value: pki.ClusterConfig, + EnvVar: "RKE_CONFIG", + }, + } + encryptFlags = append(encryptFlags, commonFlags...) + return cli.Command{ + Name: "encrypt", + Usage: "Manage cluster encryption provider keys", + Subcommands: cli.Commands{ + cli.Command{ + Name: "rotate-key", + Usage: "Rotate cluster encryption provider key", + Action: rotateEncryptionKeyFromCli, + Flags: encryptFlags, + }, + }, + } +} + +func rotateEncryptionKeyFromCli(ctx *cli.Context) error { + logrus.Infof("Running RKE version: %v", ctx.App.Version) + clusterFile, filePath, err := resolveClusterFile(ctx) + if err != nil { + return fmt.Errorf("Failed to resolve cluster file: %v", err) + } + + rkeConfig, err := cluster.ParseConfig(clusterFile) + if err != nil { + return fmt.Errorf("Failed to parse cluster file: %v", err) + } + + rkeConfig, err = setOptionsFromCLI(ctx, rkeConfig) + if err != nil { + return err + } + + // setting up the flags + flags := cluster.GetExternalFlags(false, false, false, "", filePath) + + return RotateEncryptionKey(context.Background(), rkeConfig, hosts.DialersOptions{}, flags) +} + +func RotateEncryptionKey(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, + dialersOptions hosts.DialersOptions, flags cluster.ExternalFlags) error { + log.Infof(ctx, "Rotating cluster secrets encryption key..") + stateFilePath := cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir) + rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath) + // We generate the first encryption config in ClusterInit, to store it ASAP. It's written + // to the DesiredState + stateEncryptionConfig := rkeFullState.DesiredState.EncryptionConfig + + // if CurrentState has EncryptionConfig, it means this is NOT the first time we enable encryption, we should use the _latest_ applied value from the current cluster + if rkeFullState.CurrentState.EncryptionConfig != "" { + stateEncryptionConfig = rkeFullState.CurrentState.EncryptionConfig + } + + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, stateEncryptionConfig) + if err != nil { + return err + } + if kubeCluster.IsEncryptionCustomConfig() { + return fmt.Errorf("can't rotate encryption keys: Key Rotation is not supported with custom configuration") + } + if !kubeCluster.IsEncryptionEnabled() { + return fmt.Errorf("can't rotate encryption keys: Encryption Configuration is disabled") + } + kubeCluster.Certificates = rkeFullState.DesiredState.CertificatesBundle + if err := kubeCluster.SetupDialers(ctx, dialersOptions); err != nil { + return err + } + if err := kubeCluster.TunnelHosts(ctx, flags); err != nil { + return err + } + + err = kubeCluster.RotateEncryptionKey(ctx, rkeFullState) + if err != nil { + return err + } + // make sure we have the latest state + rkeFullState, _ = cluster.ReadStateFile(ctx, stateFilePath) + log.Infof(ctx, "Reconciling cluster state") + if err := kubeCluster.ReconcileDesiredStateEncryptionConfig(ctx, rkeFullState); err != nil { + return err + } + log.Infof(ctx, "Cluster secrets encryption key rotated successfully") + return nil +} diff --git a/cmd/etcd.go b/cmd/etcd.go index ffbe3b5c..5e403498 100644 --- a/cmd/etcd.go +++ b/cmd/etcd.go @@ -92,7 +92,7 @@ func SnapshotSaveEtcdHosts( flags cluster.ExternalFlags, snapshotName string) error { log.Infof(ctx, "Starting saving snapshot on etcd hosts") - kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, "") if err != nil { return err } @@ -121,12 +121,14 @@ func RestoreEtcdSnapshot( snapshotName string) (string, string, string, string, map[string]pki.CertificatePKI, error) { var APIURL, caCrt, clientCert, clientKey string log.Infof(ctx, "Restoring etcd snapshot %s", snapshotName) - kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) + + stateFilePath := cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir) + rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath) + + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, rkeFullState.DesiredState.EncryptionConfig) if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } - stateFilePath := cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir) - rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath) if err := checkLegacyCluster(ctx, kubeCluster, rkeFullState, flags); err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } @@ -240,7 +242,7 @@ func SnapshotRemoveFromEtcdHosts( flags cluster.ExternalFlags, snapshotName string) error { log.Infof(ctx, "Starting snapshot remove on etcd hosts") - kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, "") if err != nil { return err } diff --git a/cmd/remove.go b/cmd/remove.go index 208efab1..077ca8d4 100644 --- a/cmd/remove.go +++ b/cmd/remove.go @@ -57,7 +57,7 @@ func ClusterRemove( log.Infof(ctx, "Tearing down Kubernetes cluster") - kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) + kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags, "") if err != nil { return err } diff --git a/cmd/up.go b/cmd/up.go index a63a81ed..6b5e2f7e 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -84,7 +84,16 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } - kubeCluster, err := cluster.InitClusterObject(ctx, clusterState.DesiredState.RancherKubernetesEngineConfig.DeepCopy(), flags) + // We generate the first encryption config in ClusterInit, to store it ASAP. It's written + // to the DesiredState + stateEncryptionConfig := clusterState.DesiredState.EncryptionConfig + + // if CurrentState has EncryptionConfig, it means this is NOT the first time we enable encryption, we should use the _latest_ applied value from the current cluster + if clusterState.CurrentState.EncryptionConfig != "" { + stateEncryptionConfig = clusterState.CurrentState.EncryptionConfig + } + + kubeCluster, err := cluster.InitClusterObject(ctx, clusterState.DesiredState.RancherKubernetesEngineConfig.DeepCopy(), flags, stateEncryptionConfig) if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } @@ -136,10 +145,14 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } + // update APIURL after reconcile if len(kubeCluster.ControlPlaneHosts) > 0 { APIURL = fmt.Sprintf("https://%s:6443", kubeCluster.ControlPlaneHosts[0].Address) } + if err = cluster.ReconcileEncryptionProviderConfig(ctx, kubeCluster, currentCluster); err != nil { + return APIURL, caCrt, clientCert, clientKey, nil, err + } if err := kubeCluster.PrePullK8sImages(ctx); err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err @@ -149,7 +162,6 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } - // Apply Authz configuration after deploying controlplane err = cluster.ApplyAuthzResources(ctx, kubeCluster.RancherKubernetesEngineConfig, flags, dialersOptions) if err != nil { @@ -184,6 +196,11 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } + if kubeCluster.EncryptionConfig.RewriteSecrets { + if err = kubeCluster.RewriteSecrets(ctx); err != nil { + return APIURL, caCrt, clientCert, clientKey, nil, err + } + } if err := checkAllIncluded(kubeCluster); err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err @@ -220,6 +237,7 @@ func clusterUpFromCli(ctx *cli.Context) error { } rkeConfig, err := cluster.ParseConfig(clusterFile) + // logrus.Infof("melsayed---------clusterUpFromCli--------- %+v", rkeConfig.Services.KubeAPI.SecretsEncryptionConfig) if err != nil { return fmt.Errorf("Failed to parse cluster file: %v", err) } diff --git a/go.mod b/go.mod index 0b63aca6..62e59f2d 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/docker v0.7.3-0.20190808172531-150530564a14 github.com/docker/go-connections v0.3.0 + github.com/ghodss/yaml v1.0.0 github.com/go-ini/ini v1.37.0 github.com/google/btree v1.0.0 // indirect github.com/gorilla/mux v1.7.3 // indirect diff --git a/k8s/clusterrole.go b/k8s/clusterrole.go index ae231c51..64828a10 100644 --- a/k8s/clusterrole.go +++ b/k8s/clusterrole.go @@ -8,7 +8,7 @@ import ( func UpdateClusterRoleBindingFromYaml(k8sClient *kubernetes.Clientset, clusterRoleBindingYaml string) error { clusterRoleBinding := rbacv1.ClusterRoleBinding{} - if err := decodeYamlResource(&clusterRoleBinding, clusterRoleBindingYaml); err != nil { + if err := DecodeYamlResource(&clusterRoleBinding, clusterRoleBindingYaml); err != nil { return err } return retryTo(updateClusterRoleBinding, k8sClient, clusterRoleBinding, DefaultRetries, DefaultSleepSeconds) @@ -29,7 +29,7 @@ func updateClusterRoleBinding(k8sClient *kubernetes.Clientset, crb interface{}) func UpdateClusterRoleFromYaml(k8sClient *kubernetes.Clientset, clusterRoleYaml string) error { clusterRole := rbacv1.ClusterRole{} - if err := decodeYamlResource(&clusterRole, clusterRoleYaml); err != nil { + if err := DecodeYamlResource(&clusterRole, clusterRoleYaml); err != nil { return err } diff --git a/k8s/job.go b/k8s/job.go index 72cbc1dd..53a2e94d 100644 --- a/k8s/job.go +++ b/k8s/job.go @@ -18,7 +18,7 @@ type JobStatus struct { func ApplyK8sSystemJob(jobYaml, kubeConfigPath string, k8sWrapTransport transport.WrapperFunc, timeout int, addonUpdated bool) error { job := v1.Job{} - if err := decodeYamlResource(&job, jobYaml); err != nil { + if err := DecodeYamlResource(&job, jobYaml); err != nil { return err } if job.Namespace == metav1.NamespaceNone { @@ -53,7 +53,7 @@ func ApplyK8sSystemJob(jobYaml, kubeConfigPath string, k8sWrapTransport transpor func DeleteK8sSystemJob(jobYaml string, k8sClient *kubernetes.Clientset, timeout int) error { job := v1.Job{} - if err := decodeYamlResource(&job, jobYaml); err != nil { + if err := DecodeYamlResource(&job, jobYaml); err != nil { return err } if err := deleteK8sJob(k8sClient, job.Name, job.Namespace); err != nil { diff --git a/k8s/k8s.go b/k8s/k8s.go index 7bf7a87c..262aceca 100644 --- a/k8s/k8s.go +++ b/k8s/k8s.go @@ -2,9 +2,10 @@ package k8s import ( "bytes" - "k8s.io/client-go/transport" "time" + "k8s.io/client-go/transport" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -36,7 +37,7 @@ func NewClient(kubeConfigPath string, k8sWrapTransport transport.WrapperFunc) (* return K8sClientSet, nil } -func decodeYamlResource(resource interface{}, yamlManifest string) error { +func DecodeYamlResource(resource interface{}, yamlManifest string) error { decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(yamlManifest))) return decoder.Decode(&resource) } diff --git a/k8s/psp.go b/k8s/psp.go index 5960ad78..c7af3ea9 100644 --- a/k8s/psp.go +++ b/k8s/psp.go @@ -8,7 +8,7 @@ import ( func UpdatePodSecurityPolicyFromYaml(k8sClient *kubernetes.Clientset, pspYaml string) error { psp := v1beta1.PodSecurityPolicy{} - if err := decodeYamlResource(&psp, pspYaml); err != nil { + if err := DecodeYamlResource(&psp, pspYaml); err != nil { return err } return retryTo(updatePodSecurityPolicy, k8sClient, psp, DefaultRetries, DefaultSleepSeconds) diff --git a/k8s/role.go b/k8s/role.go index bccf42ce..bb66b26b 100644 --- a/k8s/role.go +++ b/k8s/role.go @@ -8,7 +8,7 @@ import ( func UpdateRoleBindingFromYaml(k8sClient *kubernetes.Clientset, roleBindingYaml, namespace string) error { roleBinding := rbacv1.RoleBinding{} - if err := decodeYamlResource(&roleBinding, roleBindingYaml); err != nil { + if err := DecodeYamlResource(&roleBinding, roleBindingYaml); err != nil { return err } roleBinding.Namespace = namespace @@ -30,7 +30,7 @@ func updateRoleBinding(k8sClient *kubernetes.Clientset, rb interface{}) error { func UpdateRoleFromYaml(k8sClient *kubernetes.Clientset, roleYaml, namespace string) error { role := rbacv1.Role{} - if err := decodeYamlResource(&role, roleYaml); err != nil { + if err := DecodeYamlResource(&role, roleYaml); err != nil { return err } role.Namespace = namespace diff --git a/k8s/secret.go b/k8s/secret.go index baa7b48e..9b0fd8ea 100644 --- a/k8s/secret.go +++ b/k8s/secret.go @@ -2,31 +2,24 @@ package k8s import ( "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) -func GetSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret, error) { - return k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) +func GetSystemSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret, error) { + return GetSecret(k8sClient, secretName, metav1.NamespaceSystem) } -func UpdateSecret(k8sClient *kubernetes.Clientset, secretDataMap map[string][]byte, secretName string) error { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: metav1.NamespaceSystem, - }, - Data: secretDataMap, - } - if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - // update secret if its already exist - if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err != nil { - return err - } - } - return nil +func GetSecret(k8sClient *kubernetes.Clientset, secretName, namespace string) (*v1.Secret, error) { + return k8sClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}) +} + +func GetSecretsList(k8sClient *kubernetes.Clientset, namespace string) (*v1.SecretList, error) { + return k8sClient.CoreV1().Secrets("").List(metav1.ListOptions{}) +} + +func UpdateSecret(k8sClient *kubernetes.Clientset, secret *v1.Secret) error { + var err error + _, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(secret) + return err } diff --git a/k8s/serviceaccount.go b/k8s/serviceaccount.go index 8a433d39..215ffb1a 100644 --- a/k8s/serviceaccount.go +++ b/k8s/serviceaccount.go @@ -10,7 +10,7 @@ import ( func UpdateServiceAccountFromYaml(k8sClient *kubernetes.Clientset, serviceAccountYaml string) error { serviceAccount := v1.ServiceAccount{} - if err := decodeYamlResource(&serviceAccount, serviceAccountYaml); err != nil { + if err := DecodeYamlResource(&serviceAccount, serviceAccountYaml); err != nil { return err } return retryTo(updateServiceAccount, k8sClient, serviceAccount, DefaultRetries, DefaultSleepSeconds) diff --git a/main.go b/main.go index 0d74244e..07371d11 100644 --- a/main.go +++ b/main.go @@ -1,11 +1,12 @@ package main import ( - "github.com/rancher/rke/metadata" "io/ioutil" "os" "regexp" + "github.com/rancher/rke/metadata" + "github.com/mattn/go-colorable" "github.com/rancher/rke/cmd" "github.com/sirupsen/logrus" @@ -52,6 +53,7 @@ func mainErr() error { cmd.ConfigCommand(), cmd.EtcdCommand(), cmd.CertificateCommand(), + cmd.EncryptionCommand(), } app.Flags = []cli.Flag{ cli.BoolFlag{ diff --git a/services/kubeapi.go b/services/kubeapi.go index 45f0044e..914e8315 100644 --- a/services/kubeapi.go +++ b/services/kubeapi.go @@ -5,8 +5,10 @@ import ( "github.com/rancher/rke/docker" "github.com/rancher/rke/hosts" + "github.com/rancher/rke/log" "github.com/rancher/rke/pki" "github.com/rancher/types/apis/management.cattle.io/v3" + "github.com/sirupsen/logrus" ) func runKubeAPI(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeAPIProcess v3.Process, alpineImage string, certMap map[string]pki.CertificatePKI) error { @@ -28,3 +30,21 @@ func removeKubeAPI(ctx context.Context, host *hosts.Host) error { func RestartKubeAPI(ctx context.Context, host *hosts.Host) error { return docker.DoRestartContainer(ctx, host.DClient, KubeAPIContainerName, host.Address) } + +func RestartKubeAPIWithHealthcheck(ctx context.Context, hostList []*hosts.Host, df hosts.DialerFactory, certMap map[string]pki.CertificatePKI) error { + log.Infof(ctx, "[%s] Restarting %s on contorl plane nodes..", ControlRole, KubeAPIContainerName) + for _, runHost := range hostList { + logrus.Debugf("[%s] Restarting %s on node [%s]", ControlRole, KubeAPIContainerName, runHost.Address) + if err := RestartKubeAPI(ctx, runHost); err != nil { + return err + } + logrus.Debugf("[%s] Running healthcheck for %s on node [%s]", ControlRole, KubeAPIContainerName, runHost.Address) + if err := runHealthcheck(ctx, runHost, KubeAPIContainerName, + df, GetHealthCheckURL(true, KubeAPIPort), + certMap); err != nil { + return err + } + } + log.Infof(ctx, "[%s] Restarted %s on contorl plane nodes successfully", ControlRole, KubeAPIContainerName) + return nil +} diff --git a/templates/encryption_provider.go b/templates/encryption_provider.go new file mode 100644 index 00000000..b607ffbe --- /dev/null +++ b/templates/encryption_provider.go @@ -0,0 +1,34 @@ +package templates + +const ( + DisabledEncryptionProviderFile = `apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: +- resources: + - secrets + providers: + - identity: {} + - aescbc: + keys: + - name: {{.Name}} + secret: {{.Secret}}` + + MultiKeyEncryptionProviderFile = `apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: +- resources: + - secrets + providers: + - aescbc: + keys: +{{- range $i, $v:= .KeyList}} + - name: {{ $v.Name}} + secret: {{ $v.Secret -}} +{{end}} + - identity: {}` + + CustomEncryptionProviderFile = `apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +{{.CustomConfig}} +` +)