2017-11-02 12:07:10 +02:00
package cluster
import (
2018-01-09 15:10:56 -07:00
"context"
2019-10-30 13:04:16 -07:00
"encoding/json"
2017-11-02 12:07:10 +02:00
"fmt"
"net"
2021-05-31 16:49:01 +02:00
"os"
2018-10-16 23:52:15 +02:00
"reflect"
2017-12-01 01:16:45 +02:00
"strings"
2018-10-16 23:52:15 +02:00
"time"
2017-11-02 12:07:10 +02:00
2018-11-01 01:11:57 +02:00
"github.com/docker/docker/api/types"
2019-10-30 13:04:16 -07:00
ghodssyaml "github.com/ghodss/yaml"
2019-11-11 18:25:31 -08:00
"github.com/rancher/norman/types/convert"
2019-11-19 16:42:59 -08:00
"github.com/rancher/norman/types/values"
2017-12-14 23:56:19 +02:00
"github.com/rancher/rke/authz"
2018-01-30 20:15:14 +02:00
"github.com/rancher/rke/docker"
2017-11-02 12:07:10 +02:00
"github.com/rancher/rke/hosts"
2018-02-01 23:28:31 +02:00
"github.com/rancher/rke/k8s"
2018-01-09 15:10:56 -07:00
"github.com/rancher/rke/log"
2019-08-19 10:53:15 -07:00
"github.com/rancher/rke/metadata"
2017-11-02 12:07:10 +02:00
"github.com/rancher/rke/pki"
2019-10-30 13:04:16 -07:00
"github.com/rancher/rke/pki/cert"
2017-11-02 12:07:10 +02:00
"github.com/rancher/rke/services"
2020-07-11 09:24:19 -07:00
v3 "github.com/rancher/rke/types"
2018-10-18 00:26:54 +02:00
"github.com/rancher/rke/util"
2017-11-13 23:28:38 +02:00
"github.com/sirupsen/logrus"
2018-02-01 23:43:09 +02:00
"golang.org/x/sync/errgroup"
2017-12-05 09:55:58 -07:00
"gopkg.in/yaml.v2"
2019-03-04 18:59:43 +02:00
v1 "k8s.io/api/core/v1"
2020-03-26 14:32:45 -07:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2022-11-01 22:05:35 -07:00
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2019-10-30 13:04:16 -07:00
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
2022-10-27 15:58:10 -07:00
apiserverv1 "k8s.io/apiserver/pkg/apis/apiserver/v1"
2019-10-30 13:04:16 -07:00
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
2017-11-02 12:07:10 +02:00
"k8s.io/client-go/kubernetes"
2017-12-01 01:16:45 +02:00
"k8s.io/client-go/tools/clientcmd"
2019-08-19 10:53:15 -07:00
"k8s.io/client-go/transport"
2022-10-31 12:48:44 -07:00
eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit"
2017-11-02 12:07:10 +02:00
)
type Cluster struct {
2018-12-28 09:41:37 -07:00
AuthnStrategies map [ string ] bool
2017-12-16 05:38:15 +02:00
ConfigPath string
2018-11-08 01:54:08 +02:00
ConfigDir string
CloudConfigFile string
2017-12-01 01:16:45 +02:00
ControlPlaneHosts [ ] * hosts . Host
2017-11-14 20:11:21 +02:00
Certificates map [ string ] pki . CertificatePKI
2019-01-03 01:02:34 +02:00
CertificateDir string
2017-11-14 20:11:21 +02:00
ClusterDomain string
ClusterCIDR string
ClusterDNSServer string
2019-01-07 13:52:57 -06:00
DinD bool
2017-12-20 00:18:27 +02:00
DockerDialerFactory hosts . DialerFactory
2018-11-08 01:54:08 +02:00
EtcdHosts [ ] * hosts . Host
EtcdReadyHosts [ ] * hosts . Host
2019-04-23 23:42:10 +02:00
ForceDeployCerts bool
2018-11-08 01:54:08 +02:00
InactiveHosts [ ] * hosts . Host
2019-08-19 10:53:15 -07:00
K8sWrapTransport transport . WrapperFunc
2018-11-08 01:54:08 +02:00
KubeClient * kubernetes . Clientset
2021-06-25 16:44:47 +02:00
KubernetesServiceIP [ ] net . IP
2018-11-08 01:54:08 +02:00
LocalKubeConfigPath string
2018-01-11 03:00:14 +02:00
LocalConnDialerFactory hosts . DialerFactory
2018-01-30 20:15:14 +02:00
PrivateRegistriesMap map [ string ] v3 . PrivateRegistry
2018-11-08 01:54:08 +02:00
StateFilePath string
2018-03-20 14:56:49 +02:00
UpdateWorkersOnly bool
2019-01-03 01:02:34 +02:00
UseKubectlDeploy bool
2018-11-08 01:54:08 +02:00
v3 . RancherKubernetesEngineConfig ` yaml:",inline" `
WorkerHosts [ ] * hosts . Host
2019-10-03 03:56:39 +02:00
EncryptionConfig encryptionConfig
2020-02-04 11:27:52 -08:00
NewHosts map [ string ] bool
2020-02-14 09:40:23 -08:00
MaxUnavailableForWorkerNodes int
2020-02-26 13:33:22 -08:00
MaxUnavailableForControlNodes int
2019-10-03 03:56:39 +02:00
}
type encryptionConfig struct {
RewriteSecrets bool
RotateKey bool
EncryptionProviderFile string
2017-11-02 12:07:10 +02:00
}
const (
2019-11-01 14:08:43 -07:00
AuthnX509Provider = "x509"
AuthnWebhookProvider = "webhook"
StateConfigMapName = "cluster-state"
FullStateConfigMapName = "full-cluster-state"
UpdateStateTimeout = 30
GetStateTimeout = 30
2020-12-18 14:01:47 -07:00
RewriteWorkers = 5
2019-11-01 14:08:43 -07:00
SyncWorkers = 10
NoneAuthorizationMode = "none"
LocalNodeAddress = "127.0.0.1"
LocalNodeHostname = "localhost"
LocalNodeUser = "root"
CloudProvider = "CloudProvider"
ControlPlane = "controlPlane"
KubeAppLabel = "k8s-app"
AppLabel = "app"
NameLabel = "name"
2018-08-20 06:37:04 +02:00
2018-10-18 00:26:54 +02:00
WorkerThreads = util . WorkerThreads
2021-03-16 10:54:01 +01:00
SELinuxLabel = services . SELinuxLabel
2019-03-09 04:09:16 +02:00
serviceAccountTokenFileParam = "service-account-key-file"
2019-08-02 00:35:56 +02:00
SystemNamespace = "kube-system"
2019-11-19 16:42:59 -08:00
daemonsetType = "DaemonSet"
deploymentType = "Deployment"
ingressAddon = "ingress"
monitoringAddon = "monitoring"
dnsAddon = "dns"
networkAddon = "network"
2020-03-03 12:05:59 +01:00
nodelocalAddon = "nodelocal"
2017-11-02 12:07:10 +02:00
)
2020-02-26 13:33:22 -08:00
func ( c * Cluster ) DeployControlPlane ( ctx context . Context , svcOptionData map [ string ] * v3 . KubernetesServicesOptions , reconcileCluster bool ) ( string , error ) {
2020-02-04 11:27:52 -08:00
kubeClient , err := k8s . NewClient ( c . LocalKubeConfigPath , c . K8sWrapTransport )
if err != nil {
2020-02-26 13:33:22 -08:00
return "" , fmt . Errorf ( "failed to initialize new kubernetes client: %v" , err )
2020-02-04 11:27:52 -08:00
}
2017-12-27 00:07:25 +02:00
// Deploy Etcd Plane
2018-04-12 00:54:47 +02:00
etcdNodePlanMap := make ( map [ string ] v3 . RKEConfigNodePlan )
// Build etcd node plan map
for _ , etcdHost := range c . EtcdHosts {
2020-02-27 09:30:46 -08:00
svcOptions , err := c . GetKubernetesServicesOptions ( etcdHost . DockerInfo . OSType , svcOptionData )
if err != nil {
return "" , err
}
2020-07-21 13:35:36 -07:00
etcdNodePlanMap [ etcdHost . Address ] = BuildRKEConfigNodePlan ( ctx , c , etcdHost , svcOptions )
2018-04-12 00:54:47 +02:00
}
2018-02-14 22:58:35 +02:00
if len ( c . Services . Etcd . ExternalURLs ) > 0 {
log . Infof ( ctx , "[etcd] External etcd connection string has been specified, skipping etcd plane" )
} else {
2021-03-16 10:54:01 +01:00
if err := services . RunEtcdPlane ( ctx , c . EtcdHosts , etcdNodePlanMap , c . LocalConnDialerFactory , c . PrivateRegistriesMap , c . UpdateWorkersOnly , c . SystemImages . Alpine , c . Services . Etcd , c . Certificates , c . Version ) ; err != nil {
2020-02-26 13:33:22 -08:00
return "" , fmt . Errorf ( "[etcd] Failed to bring up Etcd Plane: %v" , err )
2018-02-14 22:58:35 +02:00
}
2017-11-02 12:07:10 +02:00
}
2018-02-14 22:58:35 +02:00
2017-12-27 00:07:25 +02:00
// Deploy Control plane
2018-04-12 00:54:47 +02:00
cpNodePlanMap := make ( map [ string ] v3 . RKEConfigNodePlan )
// Build cp node plan map
for _ , cpHost := range c . ControlPlaneHosts {
2020-02-27 09:30:46 -08:00
svcOptions , err := c . GetKubernetesServicesOptions ( cpHost . DockerInfo . OSType , svcOptionData )
if err != nil {
return "" , err
}
2020-07-21 13:35:36 -07:00
cpNodePlanMap [ cpHost . Address ] = BuildRKEConfigNodePlan ( ctx , c , cpHost , svcOptions )
2018-02-13 02:47:56 +02:00
}
2020-02-04 11:27:52 -08:00
if ! reconcileCluster {
if err := services . RunControlPlane ( ctx , c . ControlPlaneHosts ,
c . LocalConnDialerFactory ,
c . PrivateRegistriesMap ,
cpNodePlanMap ,
c . UpdateWorkersOnly ,
c . SystemImages . Alpine ,
2021-03-16 10:54:01 +01:00
c . Certificates ,
c . Version ) ; err != nil {
2020-02-26 13:33:22 -08:00
return "" , fmt . Errorf ( "[controlPlane] Failed to bring up Control Plane: %v" , err )
2020-02-04 11:27:52 -08:00
}
2020-02-26 13:33:22 -08:00
return "" , nil
2020-02-04 11:27:52 -08:00
}
2020-03-01 12:54:45 -08:00
return c . UpgradeControlPlane ( ctx , kubeClient , cpNodePlanMap )
2020-02-26 13:33:22 -08:00
}
2020-03-01 12:54:45 -08:00
func ( c * Cluster ) UpgradeControlPlane ( ctx context . Context , kubeClient * kubernetes . Clientset , cpNodePlanMap map [ string ] v3 . RKEConfigNodePlan ) ( string , error ) {
2020-02-14 09:40:23 -08:00
inactiveHosts := make ( map [ string ] bool )
2020-03-01 12:54:45 -08:00
var controlPlaneHosts , notReadyHosts [ ] * hosts . Host
2020-02-26 13:33:22 -08:00
var notReadyHostNames [ ] string
2020-03-07 12:11:41 -08:00
var err error
2020-02-26 13:33:22 -08:00
2020-02-14 09:40:23 -08:00
for _ , host := range c . InactiveHosts {
2020-03-01 12:54:45 -08:00
// include only hosts with controlplane role
2020-03-10 23:36:17 -07:00
if host . IsControl {
2020-02-14 09:40:23 -08:00
inactiveHosts [ host . HostnameOverride ] = true
}
}
2020-03-07 12:11:41 -08:00
c . MaxUnavailableForControlNodes , err = services . ResetMaxUnavailable ( c . MaxUnavailableForControlNodes , len ( inactiveHosts ) , services . ControlRole )
if err != nil {
return "" , err
}
2020-02-14 09:40:23 -08:00
for _ , host := range c . ControlPlaneHosts {
2020-03-10 23:36:17 -07:00
controlPlaneHosts = append ( controlPlaneHosts , host )
2020-03-01 12:54:45 -08:00
if c . NewHosts [ host . HostnameOverride ] {
continue
}
// find existing nodes that are in NotReady state
if err := services . CheckNodeReady ( kubeClient , host , services . ControlRole ) ; err != nil {
2020-03-13 14:33:53 -07:00
logrus . Debugf ( "Found node %v in NotReady state" , host . HostnameOverride )
2020-03-01 12:54:45 -08:00
notReadyHosts = append ( notReadyHosts , host )
notReadyHostNames = append ( notReadyHostNames , host . HostnameOverride )
}
2020-02-14 09:40:23 -08:00
}
2020-02-26 13:33:22 -08:00
2020-03-01 12:54:45 -08:00
if len ( notReadyHostNames ) > 0 {
// attempt upgrade on NotReady hosts without respecting max_unavailable_controlplane
logrus . Infof ( "Attempting upgrade of controlplane components on following hosts in NotReady status: %v" , strings . Join ( notReadyHostNames , "," ) )
2020-03-20 11:11:59 -07:00
err = services . RunControlPlane ( ctx , notReadyHosts ,
2020-03-01 12:54:45 -08:00
c . LocalConnDialerFactory ,
c . PrivateRegistriesMap ,
cpNodePlanMap ,
c . UpdateWorkersOnly ,
c . SystemImages . Alpine ,
2021-03-16 10:54:01 +01:00
c . Certificates ,
c . Version )
2020-03-20 11:11:59 -07:00
if err != nil {
logrus . Errorf ( "Failed to upgrade controlplane components on NotReady hosts, error: %v" , err )
}
err = services . RunWorkerPlane ( ctx , notReadyHosts ,
c . LocalConnDialerFactory ,
c . PrivateRegistriesMap ,
cpNodePlanMap ,
c . Certificates ,
c . UpdateWorkersOnly ,
2021-03-16 10:54:01 +01:00
c . SystemImages . Alpine ,
c . Version )
2020-03-20 11:11:59 -07:00
if err != nil {
logrus . Errorf ( "Failed to upgrade worker components on NotReady hosts, error: %v" , err )
}
2020-03-01 12:54:45 -08:00
// Calling CheckNodeReady wil give some time for nodes to get in Ready state
for _ , host := range notReadyHosts {
2020-03-20 11:11:59 -07:00
err = services . CheckNodeReady ( kubeClient , host , services . ControlRole )
if err != nil {
logrus . Errorf ( "Host %v failed to report Ready status with error: %v" , host . HostnameOverride , err )
}
2020-03-01 12:54:45 -08:00
}
2020-02-26 13:33:22 -08:00
}
// rolling upgrade respecting maxUnavailable
errMsgMaxUnavailableNotFailed , err := services . UpgradeControlPlaneNodes ( ctx , kubeClient , controlPlaneHosts ,
2018-01-30 20:15:14 +02:00
c . LocalConnDialerFactory ,
2018-02-13 02:47:56 +02:00
c . PrivateRegistriesMap ,
2018-04-12 00:54:47 +02:00
cpNodePlanMap ,
2018-03-21 19:20:58 +02:00
c . UpdateWorkersOnly ,
2018-05-01 02:25:52 +02:00
c . SystemImages . Alpine ,
2021-03-16 10:54:01 +01:00
c . Certificates , c . UpgradeStrategy , c . NewHosts , inactiveHosts , c . MaxUnavailableForControlNodes , c . Version )
2020-02-26 13:33:22 -08:00
if err != nil {
return "" , fmt . Errorf ( "[controlPlane] Failed to upgrade Control Plane: %v" , err )
2017-11-02 12:07:10 +02:00
}
2020-02-26 13:33:22 -08:00
return errMsgMaxUnavailableNotFailed , nil
2017-12-27 00:07:25 +02:00
}
2020-02-04 11:27:52 -08:00
func ( c * Cluster ) DeployWorkerPlane ( ctx context . Context , svcOptionData map [ string ] * v3 . KubernetesServicesOptions , reconcileCluster bool ) ( string , error ) {
2020-02-14 09:40:23 -08:00
var workerOnlyHosts , etcdAndWorkerHosts [ ] * hosts . Host
2020-02-04 11:27:52 -08:00
kubeClient , err := k8s . NewClient ( c . LocalKubeConfigPath , c . K8sWrapTransport )
if err != nil {
return "" , fmt . Errorf ( "failed to initialize new kubernetes client: %v" , err )
}
2018-04-12 00:54:47 +02:00
// Deploy Worker plane
workerNodePlanMap := make ( map [ string ] v3 . RKEConfigNodePlan )
// Build cp node plan map
2018-04-14 10:18:51 -07:00
allHosts := hosts . GetUniqueHostList ( c . EtcdHosts , c . ControlPlaneHosts , c . WorkerHosts )
2020-02-14 09:40:23 -08:00
for _ , host := range allHosts {
2020-02-27 09:30:46 -08:00
svcOptions , err := c . GetKubernetesServicesOptions ( host . DockerInfo . OSType , svcOptionData )
if err != nil {
return "" , err
}
2020-07-21 13:35:36 -07:00
workerNodePlanMap [ host . Address ] = BuildRKEConfigNodePlan ( ctx , c , host , svcOptions )
2020-03-10 23:36:17 -07:00
if host . IsControl {
2020-02-14 09:40:23 -08:00
continue
}
if ! host . IsEtcd {
// separating hosts with only worker role so they undergo upgrade in maxUnavailable batches
workerOnlyHosts = append ( workerOnlyHosts , host )
2020-02-04 11:27:52 -08:00
} else {
2020-02-14 09:40:23 -08:00
// separating nodes with etcd role, since at this point worker components in controlplane nodes are already upgraded by `UpgradeControlPlaneNodes`
// and these nodes will undergo upgrade of worker components sequentially
etcdAndWorkerHosts = append ( etcdAndWorkerHosts , host )
2020-02-04 11:27:52 -08:00
}
2018-02-13 02:47:56 +02:00
}
2020-02-04 11:27:52 -08:00
if ! reconcileCluster {
if err := services . RunWorkerPlane ( ctx , allHosts ,
c . LocalConnDialerFactory ,
c . PrivateRegistriesMap ,
workerNodePlanMap ,
c . Certificates ,
c . UpdateWorkersOnly ,
2021-03-16 10:54:01 +01:00
c . SystemImages . Alpine ,
c . Version ) ; err != nil {
2020-02-04 11:27:52 -08:00
return "" , fmt . Errorf ( "[workerPlane] Failed to bring up Worker Plane: %v" , err )
}
return "" , nil
}
2020-02-14 09:40:23 -08:00
2020-03-01 12:54:45 -08:00
return c . UpgradeWorkerPlane ( ctx , kubeClient , workerNodePlanMap , etcdAndWorkerHosts , workerOnlyHosts )
2020-02-26 13:33:22 -08:00
}
2020-03-01 12:54:45 -08:00
func ( c * Cluster ) UpgradeWorkerPlane ( ctx context . Context , kubeClient * kubernetes . Clientset , workerNodePlanMap map [ string ] v3 . RKEConfigNodePlan , etcdAndWorkerHosts , workerOnlyHosts [ ] * hosts . Host ) ( string , error ) {
2020-02-14 09:40:23 -08:00
inactiveHosts := make ( map [ string ] bool )
2020-03-01 12:54:45 -08:00
var notReadyHosts [ ] * hosts . Host
2020-02-26 13:33:22 -08:00
var notReadyHostNames [ ] string
2020-03-07 12:11:41 -08:00
var err error
2020-03-01 12:54:45 -08:00
2020-02-14 09:40:23 -08:00
for _ , host := range c . InactiveHosts {
2020-03-01 12:54:45 -08:00
// if host has controlplane role, it already has worker components upgraded
2020-03-10 23:36:17 -07:00
if ! host . IsControl {
2020-02-14 09:40:23 -08:00
inactiveHosts [ host . HostnameOverride ] = true
}
}
2020-03-07 12:11:41 -08:00
c . MaxUnavailableForWorkerNodes , err = services . ResetMaxUnavailable ( c . MaxUnavailableForWorkerNodes , len ( inactiveHosts ) , services . WorkerRole )
if err != nil {
return "" , err
}
2020-03-01 12:54:45 -08:00
for _ , host := range append ( etcdAndWorkerHosts , workerOnlyHosts ... ) {
if c . NewHosts [ host . HostnameOverride ] {
continue
}
// find existing nodes that are in NotReady state
if err := services . CheckNodeReady ( kubeClient , host , services . WorkerRole ) ; err != nil {
2020-03-13 14:33:53 -07:00
logrus . Debugf ( "Found node %v in NotReady state" , host . HostnameOverride )
2020-03-01 12:54:45 -08:00
notReadyHosts = append ( notReadyHosts , host )
notReadyHostNames = append ( notReadyHostNames , host . HostnameOverride )
}
2020-02-26 13:33:22 -08:00
}
2020-03-01 12:54:45 -08:00
if len ( notReadyHostNames ) > 0 {
// attempt upgrade on NotReady hosts without respecting max_unavailable_worker
logrus . Infof ( "Attempting upgrade of worker components on following hosts in NotReady status: %v" , strings . Join ( notReadyHostNames , "," ) )
2020-03-20 11:11:59 -07:00
err = services . RunWorkerPlane ( ctx , notReadyHosts ,
2020-03-01 12:54:45 -08:00
c . LocalConnDialerFactory ,
c . PrivateRegistriesMap ,
workerNodePlanMap ,
c . Certificates ,
c . UpdateWorkersOnly ,
2021-03-16 10:54:01 +01:00
c . SystemImages . Alpine ,
c . Version )
2020-03-20 11:11:59 -07:00
if err != nil {
logrus . Errorf ( "Failed to upgrade worker components on NotReady hosts, error: %v" , err )
}
2020-03-01 12:54:45 -08:00
// Calling CheckNodeReady wil give some time for nodes to get in Ready state
for _ , host := range notReadyHosts {
2020-03-20 11:11:59 -07:00
err = services . CheckNodeReady ( kubeClient , host , services . WorkerRole )
if err != nil {
logrus . Errorf ( "Host %v failed to report Ready status with error: %v" , host . HostnameOverride , err )
}
2020-03-01 12:54:45 -08:00
}
2020-02-26 13:33:22 -08:00
}
2020-03-01 12:54:45 -08:00
2020-02-14 09:40:23 -08:00
errMsgMaxUnavailableNotFailed , err := services . UpgradeWorkerPlaneForWorkerAndEtcdNodes ( ctx , kubeClient , etcdAndWorkerHosts , workerOnlyHosts , inactiveHosts ,
2018-01-30 20:15:14 +02:00
c . LocalConnDialerFactory ,
2018-02-13 02:47:56 +02:00
c . PrivateRegistriesMap ,
2018-04-12 00:54:47 +02:00
workerNodePlanMap ,
2018-03-12 21:04:28 +02:00
c . Certificates ,
2018-03-20 14:56:49 +02:00
c . UpdateWorkersOnly ,
2021-03-16 10:54:01 +01:00
c . SystemImages . Alpine ,
c . UpgradeStrategy ,
c . NewHosts ,
c . MaxUnavailableForWorkerNodes ,
c . Version )
2020-02-04 11:27:52 -08:00
if err != nil {
return "" , fmt . Errorf ( "[workerPlane] Failed to upgrade Worker Plane: %v" , err )
2017-11-02 12:07:10 +02:00
}
2020-02-04 11:27:52 -08:00
return errMsgMaxUnavailableNotFailed , nil
2017-11-02 12:07:10 +02:00
}
2019-10-30 13:04:16 -07:00
func parseAuditLogConfig ( clusterFile string , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
if rkeConfig . Services . KubeAPI . AuditLog != nil &&
rkeConfig . Services . KubeAPI . AuditLog . Enabled &&
2019-11-15 04:58:59 -08:00
rkeConfig . Services . KubeAPI . AuditLog . Configuration != nil &&
2019-10-30 13:04:16 -07:00
rkeConfig . Services . KubeAPI . AuditLog . Configuration . Policy == nil {
return nil
}
logrus . Debugf ( "audit log policy found in cluster.yml" )
var r map [ string ] interface { }
2019-10-31 13:35:48 -07:00
err := ghodssyaml . Unmarshal ( [ ] byte ( clusterFile ) , & r )
2019-10-30 13:04:16 -07:00
if err != nil {
return fmt . Errorf ( "error unmarshalling: %v" , err )
}
2019-10-31 13:35:48 -07:00
if r [ "services" ] == nil {
return nil
}
2019-10-30 13:04:16 -07:00
services := r [ "services" ] . ( map [ string ] interface { } )
2019-10-31 13:35:48 -07:00
if services [ "kube-api" ] == nil {
return nil
}
2022-10-31 12:48:44 -07:00
kubeAPI := services [ "kube-api" ] . ( map [ string ] interface { } )
if kubeAPI [ "audit_log" ] == nil {
2019-10-31 13:35:48 -07:00
return nil
}
2022-10-31 12:48:44 -07:00
auditlog := kubeAPI [ "audit_log" ] . ( map [ string ] interface { } )
2019-10-31 13:35:48 -07:00
if auditlog [ "configuration" ] == nil {
return nil
}
2019-10-30 13:04:16 -07:00
alc := auditlog [ "configuration" ] . ( map [ string ] interface { } )
2019-10-31 13:35:48 -07:00
if alc [ "policy" ] == nil {
return nil
}
2019-10-30 13:04:16 -07:00
policyBytes , err := json . Marshal ( alc [ "policy" ] )
if err != nil {
return fmt . Errorf ( "error marshalling audit policy: %v" , err )
}
scheme := runtime . NewScheme ( )
err = auditv1 . AddToScheme ( scheme )
if err != nil {
return fmt . Errorf ( "error adding to scheme: %v" , err )
}
codecs := serializer . NewCodecFactory ( scheme )
p := auditv1 . Policy { }
err = runtime . DecodeInto ( codecs . UniversalDecoder ( ) , policyBytes , & p )
if err != nil || p . Kind != "Policy" {
return fmt . Errorf ( "error decoding audit policy: %v" , err )
}
rkeConfig . Services . KubeAPI . AuditLog . Configuration . Policy = & p
return err
}
2022-10-31 12:48:44 -07:00
func parseEventRateLimit ( clusterFile string , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
if rkeConfig . Services . KubeAPI . EventRateLimit == nil || ! rkeConfig . Services . KubeAPI . EventRateLimit . Enabled {
return nil
}
logrus . Debugf ( "event rate limit is found in cluster.yml" )
2022-11-22 11:04:42 -07:00
var parsedClusterFile map [ string ] interface { }
err := ghodssyaml . Unmarshal ( [ ] byte ( clusterFile ) , & parsedClusterFile )
2022-10-31 12:48:44 -07:00
if err != nil {
return fmt . Errorf ( "error unmarshalling: %v" , err )
}
2022-11-22 11:04:42 -07:00
if parsedClusterFile [ "services" ] == nil {
2022-10-31 12:48:44 -07:00
return nil
}
2022-11-22 11:04:42 -07:00
cfg , found , err := unstructured . NestedMap ( parsedClusterFile , "services" , "kube-api" , "event_rate_limit" , "configuration" )
2022-10-31 12:48:44 -07:00
if err != nil {
return err
}
if ! found {
return nil
}
cfgBytes , err := json . Marshal ( cfg )
if err != nil {
return fmt . Errorf ( "error marshalling eventRateLimit: %v" , err )
}
output := eventratelimitapi . Configuration { }
err = json . Unmarshal ( cfgBytes , & output )
if err != nil {
return fmt . Errorf ( "error decoding eventRateLimit: %v" , err )
}
rkeConfig . Services . KubeAPI . EventRateLimit . Configuration = & output
return err
}
2019-10-30 13:04:16 -07:00
func parseAdmissionConfig ( clusterFile string , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
if rkeConfig . Services . KubeAPI . AdmissionConfiguration == nil {
return nil
}
logrus . Debugf ( "admission configuration found in cluster.yml" )
var r map [ string ] interface { }
2019-10-31 13:35:48 -07:00
err := ghodssyaml . Unmarshal ( [ ] byte ( clusterFile ) , & r )
2019-10-30 13:04:16 -07:00
if err != nil {
return fmt . Errorf ( "error unmarshalling: %v" , err )
}
2019-10-31 13:35:48 -07:00
if r [ "services" ] == nil {
return nil
}
2019-10-30 13:04:16 -07:00
services := r [ "services" ] . ( map [ string ] interface { } )
2019-10-31 13:35:48 -07:00
if services [ "kube-api" ] == nil {
return nil
}
2019-10-30 13:04:16 -07:00
kubeapi := services [ "kube-api" ] . ( map [ string ] interface { } )
2019-10-31 13:35:48 -07:00
if kubeapi [ "admission_configuration" ] == nil {
return nil
}
2019-10-30 13:04:16 -07:00
data , err := json . Marshal ( kubeapi [ "admission_configuration" ] )
if err != nil {
return fmt . Errorf ( "error marshalling admission configuration: %v" , err )
}
scheme := runtime . NewScheme ( )
2022-10-27 15:58:10 -07:00
err = apiserverv1 . AddToScheme ( scheme )
2019-10-30 13:04:16 -07:00
if err != nil {
return fmt . Errorf ( "error adding to scheme: %v" , err )
}
2022-10-27 15:58:10 -07:00
err = scheme . SetVersionPriority ( apiserverv1 . SchemeGroupVersion )
2019-10-30 13:04:16 -07:00
if err != nil {
return fmt . Errorf ( "error setting version priority: %v" , err )
}
codecs := serializer . NewCodecFactory ( scheme )
2022-10-27 15:58:10 -07:00
decoder := codecs . UniversalDecoder ( apiserverv1 . SchemeGroupVersion )
2019-10-30 13:04:16 -07:00
decodedObj , err := runtime . Decode ( decoder , data )
if err != nil {
return fmt . Errorf ( "error decoding data: %v" , err )
}
2022-10-27 15:58:10 -07:00
decodedConfig , ok := decodedObj . ( * apiserverv1 . AdmissionConfiguration )
2019-10-30 13:04:16 -07:00
if ! ok {
return fmt . Errorf ( "unexpected type: %T" , decodedObj )
}
rkeConfig . Services . KubeAPI . AdmissionConfiguration = decodedConfig
return nil
}
2019-11-19 16:42:59 -08:00
func parseAddonConfig ( clusterFile string , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
var r map [ string ] interface { }
err := ghodssyaml . Unmarshal ( [ ] byte ( clusterFile ) , & r )
if err != nil {
return fmt . Errorf ( "[parseAddonConfig] error unmarshalling RKE config: %v" , err )
}
addonsResourceType := map [ string ] string {
ingressAddon : daemonsetType ,
networkAddon : daemonsetType ,
monitoringAddon : deploymentType ,
dnsAddon : deploymentType ,
2020-03-03 12:05:59 +01:00
nodelocalAddon : daemonsetType ,
2019-11-19 16:42:59 -08:00
}
for addonName , addonType := range addonsResourceType {
2020-03-03 12:05:59 +01:00
var updateStrategyField interface { }
// nodelocal is a field under dns
if addonName == nodelocalAddon {
updateStrategyField = values . GetValueN ( r , "dns" , addonName , "update_strategy" )
} else {
updateStrategyField = values . GetValueN ( r , addonName , "update_strategy" )
}
2019-11-19 16:42:59 -08:00
if updateStrategyField == nil {
continue
}
switch addonType {
case daemonsetType :
updateStrategy , err := parseDaemonSetUpdateStrategy ( updateStrategyField )
if err != nil {
return err
}
switch addonName {
case ingressAddon :
rkeConfig . Ingress . UpdateStrategy = updateStrategy
case networkAddon :
rkeConfig . Network . UpdateStrategy = updateStrategy
2020-03-03 12:05:59 +01:00
case nodelocalAddon :
rkeConfig . DNS . Nodelocal . UpdateStrategy = updateStrategy
2019-11-19 16:42:59 -08:00
}
case deploymentType :
updateStrategy , err := parseDeploymentUpdateStrategy ( updateStrategyField )
if err != nil {
return err
}
switch addonName {
case dnsAddon :
rkeConfig . DNS . UpdateStrategy = updateStrategy
case monitoringAddon :
rkeConfig . Monitoring . UpdateStrategy = updateStrategy
}
}
}
return nil
}
2019-11-11 18:25:31 -08:00
func parseIngressConfig ( clusterFile string , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
if & rkeConfig . Ingress == nil {
return nil
}
var r map [ string ] interface { }
err := ghodssyaml . Unmarshal ( [ ] byte ( clusterFile ) , & r )
if err != nil {
return fmt . Errorf ( "[parseIngressConfig] error unmarshalling ingress config: %v" , err )
}
ingressMap := convert . ToMapInterface ( r [ "ingress" ] )
if err := parseIngressExtraEnv ( ingressMap , rkeConfig ) ; err != nil {
return err
}
if err := parseIngressExtraVolumes ( ingressMap , rkeConfig ) ; err != nil {
return err
}
2021-06-06 09:16:44 +02:00
return parseIngressExtraVolumeMounts ( ingressMap , rkeConfig )
2019-11-11 18:25:31 -08:00
}
2020-03-26 14:39:24 -07:00
func parseDaemonSetUpdateStrategy ( updateStrategyField interface { } ) ( * v3 . DaemonSetUpdateStrategy , error ) {
2019-11-19 16:42:59 -08:00
updateStrategyBytes , err := json . Marshal ( updateStrategyField )
if err != nil {
return nil , fmt . Errorf ( "[parseDaemonSetUpdateStrategy] error marshalling updateStrategy: %v" , err )
}
2020-03-26 14:39:24 -07:00
var updateStrategy * v3 . DaemonSetUpdateStrategy
2019-11-19 16:42:59 -08:00
err = json . Unmarshal ( updateStrategyBytes , & updateStrategy )
if err != nil {
return nil , fmt . Errorf ( "[parseIngressUpdateStrategy] error unmarshaling updateStrategy: %v" , err )
}
2020-03-26 14:39:24 -07:00
2019-11-19 16:42:59 -08:00
return updateStrategy , nil
}
2020-03-26 14:39:24 -07:00
func parseDeploymentUpdateStrategy ( updateStrategyField interface { } ) ( * v3 . DeploymentStrategy , error ) {
2019-11-19 16:42:59 -08:00
updateStrategyBytes , err := json . Marshal ( updateStrategyField )
if err != nil {
return nil , fmt . Errorf ( "[parseDeploymentUpdateStrategy] error marshalling updateStrategy: %v" , err )
}
2020-03-26 14:39:24 -07:00
var updateStrategy * v3 . DeploymentStrategy
2019-11-19 16:42:59 -08:00
err = json . Unmarshal ( updateStrategyBytes , & updateStrategy )
if err != nil {
return nil , fmt . Errorf ( "[parseDeploymentUpdateStrategy] error unmarshaling updateStrategy: %v" , err )
}
return updateStrategy , nil
}
2019-11-11 18:25:31 -08:00
func parseIngressExtraEnv ( ingressMap map [ string ] interface { } , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
extraEnvs , ok := ingressMap [ "extra_envs" ]
if ! ok {
return nil
}
ingressEnvBytes , err := json . Marshal ( extraEnvs )
if err != nil {
return fmt . Errorf ( "[parseIngressExtraEnv] error marshalling ingress config extraEnvs: %v" , err )
}
var envs [ ] v3 . ExtraEnv
err = json . Unmarshal ( ingressEnvBytes , & envs )
if err != nil {
return fmt . Errorf ( "[parseIngressExtraEnv] error unmarshaling ingress config extraEnvs: %v" , err )
}
rkeConfig . Ingress . ExtraEnvs = envs
return nil
}
func parseIngressExtraVolumes ( ingressMap map [ string ] interface { } , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
extraVolumes , ok := ingressMap [ "extra_volumes" ]
if ! ok {
return nil
}
ingressVolBytes , err := json . Marshal ( extraVolumes )
if err != nil {
return fmt . Errorf ( "[parseIngressExtraVolumes] error marshalling ingress config extraVolumes: %v" , err )
}
var volumes [ ] v3 . ExtraVolume
err = json . Unmarshal ( ingressVolBytes , & volumes )
if err != nil {
return fmt . Errorf ( "[parseIngressExtraVolumes] error unmarshaling ingress config extraVolumes: %v" , err )
}
rkeConfig . Ingress . ExtraVolumes = volumes
return nil
}
func parseIngressExtraVolumeMounts ( ingressMap map [ string ] interface { } , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
extraVolMounts , ok := ingressMap [ "extra_volume_mounts" ]
if ! ok {
return nil
}
ingressVolMountBytes , err := json . Marshal ( extraVolMounts )
if err != nil {
return fmt . Errorf ( "[parseIngressExtraVolumeMounts] error marshalling ingress config extraVolumeMounts: %v" , err )
}
var volumeMounts [ ] v3 . ExtraVolumeMount
err = json . Unmarshal ( ingressVolMountBytes , & volumeMounts )
if err != nil {
return fmt . Errorf ( "[parseIngressExtraVolumeMounts] error unmarshaling ingress config extraVolumeMounts: %v" , err )
}
rkeConfig . Ingress . ExtraVolumeMounts = volumeMounts
return nil
}
2020-02-04 11:27:52 -08:00
func parseNodeDrainInput ( clusterFile string , rkeConfig * v3 . RancherKubernetesEngineConfig ) error {
// setting some defaults here because for these fields there's no way of differentiating between user provided null value vs golang setting it to null during unmarshal
if rkeConfig . UpgradeStrategy == nil || rkeConfig . UpgradeStrategy . DrainInput == nil {
return nil
}
var config map [ string ] interface { }
err := ghodssyaml . Unmarshal ( [ ] byte ( clusterFile ) , & config )
if err != nil {
return fmt . Errorf ( "[parseNodeDrainInput] error unmarshalling: %v" , err )
}
upgradeStrategy , err := convert . EncodeToMap ( config [ "upgrade_strategy" ] )
if err != nil {
return err
}
nodeDrainInputMap , err := convert . EncodeToMap ( upgradeStrategy [ "node_drain_input" ] )
if err != nil {
return err
}
nodeDrainInputBytes , err := ghodssyaml . Marshal ( nodeDrainInputMap )
if err != nil {
return err
}
// this will only have fields that user set and none of the default empty values
var nodeDrainInput v3 . NodeDrainInput
if err := ghodssyaml . Unmarshal ( nodeDrainInputBytes , & nodeDrainInput ) ; err != nil {
return err
}
var update bool
if _ , ok := nodeDrainInputMap [ "ignore_daemonsets" ] ; ! ok {
// user hasn't provided any input, default to true
2020-05-20 20:30:46 +02:00
nodeDrainInput . IgnoreDaemonSets = & DefaultNodeDrainIgnoreDaemonsets
2020-02-04 11:27:52 -08:00
update = true
}
if _ , ok := nodeDrainInputMap [ "timeout" ] ; ! ok {
// user hasn't provided any input, default to 120
nodeDrainInput . Timeout = DefaultNodeDrainTimeout
update = true
}
if providedGracePeriod , ok := nodeDrainInputMap [ "grace_period" ] . ( float64 ) ; ! ok {
// user hasn't provided any input, default to -1
nodeDrainInput . GracePeriod = DefaultNodeDrainGracePeriod
update = true
} else {
// TODO: ghodssyaml.Marshal is losing the user provided value for GracePeriod, investigate why, till then assign the provided value explicitly
nodeDrainInput . GracePeriod = int ( providedGracePeriod )
}
if update {
rkeConfig . UpgradeStrategy . DrainInput = & nodeDrainInput
}
return nil
}
2017-12-16 05:38:15 +02:00
func ParseConfig ( clusterFile string ) ( * v3 . RancherKubernetesEngineConfig , error ) {
2020-03-05 19:42:05 +01:00
logrus . Tracef ( "Parsing cluster file [%v]" , clusterFile )
2017-12-16 05:38:15 +02:00
var rkeConfig v3 . RancherKubernetesEngineConfig
2019-10-03 03:56:39 +02:00
// the customConfig is mapped to a k8s type, which doesn't unmarshal well because it has a
// nested struct and no yaml tags. Therefor, we have to re-parse it again and assign it correctly.
// this only affects rke cli. Since rkeConfig is passed from rancher directly in the rancher use case.
2019-11-01 14:08:43 -07:00
clusterFile , secretConfig , err := resolveCustomEncryptionConfig ( clusterFile )
2019-10-03 03:56:39 +02:00
if err != nil {
return nil , err
}
2017-12-16 05:38:15 +02:00
if err := yaml . Unmarshal ( [ ] byte ( clusterFile ) , & rkeConfig ) ; err != nil {
return nil , err
}
2019-10-03 03:56:39 +02:00
if isEncryptionEnabled ( & rkeConfig ) && secretConfig != nil {
rkeConfig . Services . KubeAPI . SecretsEncryptionConfig . CustomConfig = secretConfig
}
2019-10-30 13:04:16 -07:00
if err := parseAdmissionConfig ( clusterFile , & rkeConfig ) ; err != nil {
return & rkeConfig , fmt . Errorf ( "error parsing admission config: %v" , err )
}
if err := parseAuditLogConfig ( clusterFile , & rkeConfig ) ; err != nil {
return & rkeConfig , fmt . Errorf ( "error parsing audit log config: %v" , err )
}
2022-10-31 12:48:44 -07:00
if err := parseEventRateLimit ( clusterFile , & rkeConfig ) ; err != nil {
return & rkeConfig , fmt . Errorf ( "error parsing event rate limit config: %v" , err )
}
2019-11-11 18:25:31 -08:00
if err := parseIngressConfig ( clusterFile , & rkeConfig ) ; err != nil {
return & rkeConfig , fmt . Errorf ( "error parsing ingress config: %v" , err )
}
2020-02-04 11:27:52 -08:00
if err := parseNodeDrainInput ( clusterFile , & rkeConfig ) ; err != nil {
return & rkeConfig , fmt . Errorf ( "error parsing upgrade strategy and node drain input: %v" , err )
}
2019-11-19 16:42:59 -08:00
if err := parseAddonConfig ( clusterFile , & rkeConfig ) ; err != nil {
return & rkeConfig , fmt . Errorf ( "error parsing addon config: %v" , err )
}
2017-12-16 05:38:15 +02:00
return & rkeConfig , nil
}
2019-10-03 03:56:39 +02:00
func InitClusterObject ( ctx context . Context , rkeConfig * v3 . RancherKubernetesEngineConfig , flags ExternalFlags , encryptConfig string ) ( * Cluster , error ) {
2018-11-02 07:53:29 +02:00
// basic cluster object from rkeConfig
2019-10-03 03:56:39 +02:00
var err error
2017-12-16 05:38:15 +02:00
c := & Cluster {
2018-12-28 09:41:37 -07:00
AuthnStrategies : make ( map [ string ] bool ) ,
2017-12-16 05:38:15 +02:00
RancherKubernetesEngineConfig : * rkeConfig ,
2018-11-08 01:54:08 +02:00
ConfigPath : flags . ClusterFilePath ,
ConfigDir : flags . ConfigDir ,
2019-01-07 13:52:57 -06:00
DinD : flags . DinD ,
2019-01-03 01:02:34 +02:00
CertificateDir : flags . CertificateDir ,
2018-11-08 01:54:08 +02:00
StateFilePath : GetStateFilePath ( flags . ClusterFilePath , flags . ConfigDir ) ,
2018-01-30 20:15:14 +02:00
PrivateRegistriesMap : make ( map [ string ] v3 . PrivateRegistry ) ,
2019-10-03 03:56:39 +02:00
EncryptionConfig : encryptionConfig {
EncryptionProviderFile : encryptConfig ,
} ,
2018-11-02 07:53:29 +02:00
}
2019-07-08 15:21:03 -07:00
if metadata . K8sVersionToRKESystemImages == nil {
2020-02-13 15:55:19 -07:00
if err := metadata . InitMetadata ( ctx ) ; err != nil {
return nil , err
}
2019-07-08 15:21:03 -07:00
}
2018-11-02 07:53:29 +02:00
if len ( c . ConfigPath ) == 0 {
c . ConfigPath = pki . ClusterConfig
2017-11-02 12:07:10 +02:00
}
2019-01-03 01:02:34 +02:00
// set kube_config, state file, and certificate dir
2018-11-08 01:54:08 +02:00
c . LocalKubeConfigPath = pki . GetLocalKubeConfig ( c . ConfigPath , c . ConfigDir )
c . StateFilePath = GetStateFilePath ( c . ConfigPath , c . ConfigDir )
2019-01-03 01:02:34 +02:00
if len ( c . CertificateDir ) == 0 {
c . CertificateDir = GetCertificateDirPath ( c . ConfigPath , c . ConfigDir )
}
2019-10-03 03:56:39 +02:00
// We don't manage custom configuration, if it's there we just use it.
if isEncryptionCustomConfig ( rkeConfig ) {
if c . EncryptionConfig . EncryptionProviderFile , err = c . readEncryptionCustomConfig ( ) ; err != nil {
return nil , err
}
} else if isEncryptionEnabled ( rkeConfig ) && c . EncryptionConfig . EncryptionProviderFile == "" {
if c . EncryptionConfig . EncryptionProviderFile , err = c . getEncryptionProviderFile ( ) ; err != nil {
return nil , err
}
}
2018-11-03 03:45:23 +02:00
2020-05-27 12:48:10 -07:00
// Setting cluster Defaults
err = c . setClusterDefaults ( ctx , flags )
if err != nil {
return nil , err
}
2018-11-02 07:53:29 +02:00
// extract cluster network configuration
2019-09-11 15:07:51 -07:00
if err = c . setNetworkOptions ( ) ; err != nil {
2021-06-25 16:44:47 +02:00
return nil , fmt . Errorf ( "Failed to set network options: %v" , err )
2019-09-11 15:07:51 -07:00
}
2018-12-28 09:41:37 -07:00
2018-11-08 01:54:08 +02:00
// Register cloud provider
if err := c . setCloudProvider ( ) ; err != nil {
return nil , fmt . Errorf ( "Failed to register cloud provider: %v" , err )
}
2018-11-02 07:53:29 +02:00
// set hosts groups
2017-12-16 05:38:15 +02:00
if err := c . InvertIndexHosts ( ) ; err != nil {
2017-11-02 12:07:10 +02:00
return nil , fmt . Errorf ( "Failed to classify hosts from config file: %v" , err )
}
2018-11-02 07:53:29 +02:00
// validate cluster configuration
2019-03-01 20:09:45 +02:00
if err := c . ValidateCluster ( ctx ) ; err != nil {
2017-11-21 21:25:08 +02:00
return nil , fmt . Errorf ( "Failed to validate cluster: %v" , err )
}
2018-11-02 07:53:29 +02:00
return c , nil
}
2017-11-21 21:25:08 +02:00
2018-11-02 07:53:29 +02:00
func ( c * Cluster ) setNetworkOptions ( ) error {
var err error
2018-02-06 21:25:54 +02:00
c . KubernetesServiceIP , err = pki . GetKubernetesServiceIP ( c . Services . KubeAPI . ServiceClusterIPRange )
2017-11-02 12:07:10 +02:00
if err != nil {
2018-11-02 07:53:29 +02:00
return fmt . Errorf ( "Failed to get Kubernetes Service IP: %v" , err )
2017-11-02 12:07:10 +02:00
}
c . ClusterDomain = c . Services . Kubelet . ClusterDomain
2017-11-06 22:50:41 +02:00
c . ClusterCIDR = c . Services . KubeController . ClusterCIDR
2017-11-08 02:32:55 +02:00
c . ClusterDNSServer = c . Services . Kubelet . ClusterDNSServer
2018-11-02 07:53:29 +02:00
return nil
}
2018-01-30 20:15:14 +02:00
2018-11-08 01:54:08 +02:00
func ( c * Cluster ) SetupDialers ( ctx context . Context , dailersOptions hosts . DialersOptions ) error {
c . DockerDialerFactory = dailersOptions . DockerDialerFactory
c . LocalConnDialerFactory = dailersOptions . LocalConnDialerFactory
c . K8sWrapTransport = dailersOptions . K8sWrapTransport
2018-05-09 00:30:50 +02:00
// Create k8s wrap transport for bastion host
if len ( c . BastionHost . Address ) > 0 {
2018-06-25 21:01:02 +02:00
var err error
c . K8sWrapTransport , err = hosts . BastionHostWrapTransport ( c . BastionHost )
if err != nil {
2018-11-02 07:53:29 +02:00
return err
2018-06-25 21:01:02 +02:00
}
2021-05-31 16:49:01 +02:00
if c . BastionHost . IgnoreProxyEnvVars {
logrus . Debug ( "Unset http proxy environment variables" )
for _ , v := range util . ProxyEnvVars {
os . Unsetenv ( v )
}
}
2018-05-09 00:30:50 +02:00
}
2018-11-02 07:53:29 +02:00
return nil
}
2018-11-07 02:24:49 +02:00
func RebuildKubeconfig ( ctx context . Context , kubeCluster * Cluster ) error {
return rebuildLocalAdminConfig ( ctx , kubeCluster )
}
2018-01-09 15:10:56 -07:00
func rebuildLocalAdminConfig ( ctx context . Context , kubeCluster * Cluster ) error {
2018-02-15 05:25:36 +02:00
if len ( kubeCluster . ControlPlaneHosts ) == 0 {
return nil
}
2020-09-28 14:59:44 +02:00
var activeControlPlaneHostFound bool
2018-01-09 15:10:56 -07:00
log . Infof ( ctx , "[reconcile] Rebuilding and updating local kube config" )
2017-12-06 04:22:50 +02:00
var workingConfig , newConfig string
2018-01-17 01:10:14 +02:00
currentKubeConfig := kubeCluster . Certificates [ pki . KubeAdminCertName ]
2017-12-01 01:16:45 +02:00
caCrt := kubeCluster . Certificates [ pki . CACertName ] . Certificate
for _ , cpHost := range kubeCluster . ControlPlaneHosts {
2017-12-06 04:22:50 +02:00
if ( currentKubeConfig == pki . CertificatePKI { } ) {
2020-09-28 14:59:44 +02:00
log . Debugf ( ctx , "[reconcile] Rebuilding and updating local kube config, creating new address" )
2017-12-06 04:22:50 +02:00
kubeCluster . Certificates = make ( map [ string ] pki . CertificatePKI )
2018-03-13 17:18:07 -07:00
newConfig = getLocalAdminConfigWithNewAddress ( kubeCluster . LocalKubeConfigPath , cpHost . Address , kubeCluster . ClusterName )
2017-12-06 04:22:50 +02:00
} else {
2020-09-28 14:59:44 +02:00
log . Debugf ( ctx , "[reconcile] Rebuilding and updating local kube config, creating new kubeconfig" )
2017-12-06 04:22:50 +02:00
kubeURL := fmt . Sprintf ( "https://%s:6443" , cpHost . Address )
caData := string ( cert . EncodeCertPEM ( caCrt ) )
crtData := string ( cert . EncodeCertPEM ( currentKubeConfig . Certificate ) )
keyData := string ( cert . EncodePrivateKeyPEM ( currentKubeConfig . Key ) )
2018-03-13 17:18:07 -07:00
newConfig = pki . GetKubeConfigX509WithData ( kubeURL , kubeCluster . ClusterName , pki . KubeAdminCertName , caData , crtData , keyData )
2017-12-06 04:22:50 +02:00
}
2018-01-09 15:10:56 -07:00
if err := pki . DeployAdminConfig ( ctx , newConfig , kubeCluster . LocalKubeConfigPath ) ; err != nil {
2019-08-21 10:19:07 +02:00
return fmt . Errorf ( "Failed to redeploy local admin config with new host: %v" , err )
2017-11-21 21:25:08 +02:00
}
2017-12-01 01:16:45 +02:00
workingConfig = newConfig
2018-02-20 13:51:57 +02:00
if _ , err := GetK8sVersion ( kubeCluster . LocalKubeConfigPath , kubeCluster . K8sWrapTransport ) ; err == nil {
2020-09-28 14:59:44 +02:00
log . Infof ( ctx , "[reconcile] host [%s] is a control plane node with reachable Kubernetes API endpoint in the cluster" , cpHost . Address )
activeControlPlaneHostFound = true
2017-12-01 01:16:45 +02:00
break
2017-11-26 20:23:06 +02:00
}
2020-09-28 14:59:44 +02:00
log . Warnf ( ctx , "[reconcile] host [%s] is a control plane node without reachable Kubernetes API endpoint in the cluster" , cpHost . Address )
}
if ! activeControlPlaneHostFound {
log . Warnf ( ctx , "[reconcile] no control plane node with reachable Kubernetes API endpoint in the cluster found" )
2017-11-17 02:45:51 +02:00
}
2017-12-01 01:16:45 +02:00
currentKubeConfig . Config = workingConfig
2018-01-17 01:10:14 +02:00
kubeCluster . Certificates [ pki . KubeAdminCertName ] = currentKubeConfig
2017-11-17 02:45:51 +02:00
return nil
}
2017-12-01 01:16:45 +02:00
func getLocalConfigAddress ( localConfigPath string ) ( string , error ) {
config , err := clientcmd . BuildConfigFromFlags ( "" , localConfigPath )
2017-11-17 02:45:51 +02:00
if err != nil {
2017-12-01 01:16:45 +02:00
return "" , err
2017-11-17 02:45:51 +02:00
}
2017-12-01 01:16:45 +02:00
splittedAdress := strings . Split ( config . Host , ":" )
address := splittedAdress [ 1 ]
return address [ 2 : ] , nil
2017-11-17 02:45:51 +02:00
}
2017-12-06 04:22:50 +02:00
2018-03-13 17:18:07 -07:00
func getLocalAdminConfigWithNewAddress ( localConfigPath , cpAddress string , clusterName string ) string {
2017-12-06 04:22:50 +02:00
config , _ := clientcmd . BuildConfigFromFlags ( "" , localConfigPath )
2019-04-02 20:21:45 +02:00
if config == nil || config . BearerToken != "" {
2018-02-15 05:25:36 +02:00
return ""
}
2017-12-06 04:22:50 +02:00
config . Host = fmt . Sprintf ( "https://%s:6443" , cpAddress )
return pki . GetKubeConfigX509WithData (
"https://" + cpAddress + ":6443" ,
2018-03-13 17:18:07 -07:00
clusterName ,
2018-01-17 01:10:14 +02:00
pki . KubeAdminCertName ,
2017-12-06 04:22:50 +02:00
string ( config . CAData ) ,
string ( config . CertData ) ,
string ( config . KeyData ) )
}
2017-12-14 23:56:19 +02:00
2018-11-08 01:54:08 +02:00
func ApplyAuthzResources ( ctx context . Context , rkeConfig v3 . RancherKubernetesEngineConfig , flags ExternalFlags , dailersOptions hosts . DialersOptions ) error {
2018-02-26 23:14:04 +02:00
// dialer factories are not needed here since we are not uses docker only k8s jobs
2019-10-03 03:56:39 +02:00
kubeCluster , err := InitClusterObject ( ctx , & rkeConfig , flags , "" )
2018-02-26 23:14:04 +02:00
if err != nil {
return err
}
2018-11-08 01:54:08 +02:00
if err := kubeCluster . SetupDialers ( ctx , dailersOptions ) ; err != nil {
2018-11-03 03:45:23 +02:00
return err
}
2018-02-26 23:14:04 +02:00
if len ( kubeCluster . ControlPlaneHosts ) == 0 {
return nil
}
2019-08-16 13:49:49 +02:00
// Print proxy environment variables as we are directly contacting the cluster
util . PrintProxyEnvVars ( )
2018-02-26 23:14:04 +02:00
if err := authz . ApplyJobDeployerServiceAccount ( ctx , kubeCluster . LocalKubeConfigPath , kubeCluster . K8sWrapTransport ) ; err != nil {
2017-12-14 23:56:19 +02:00
return fmt . Errorf ( "Failed to apply the ServiceAccount needed for job execution: %v" , err )
}
2018-02-26 23:14:04 +02:00
if kubeCluster . Authorization . Mode == NoneAuthorizationMode {
2017-12-14 23:56:19 +02:00
return nil
}
2018-02-26 23:14:04 +02:00
if kubeCluster . Authorization . Mode == services . RBACAuthorizationMode {
if err := authz . ApplySystemNodeClusterRoleBinding ( ctx , kubeCluster . LocalKubeConfigPath , kubeCluster . K8sWrapTransport ) ; err != nil {
2017-12-14 23:56:19 +02:00
return fmt . Errorf ( "Failed to apply the ClusterRoleBinding needed for node authorization: %v" , err )
}
2019-07-25 22:07:38 +02:00
if err := authz . ApplyKubeAPIClusterRole ( ctx , kubeCluster . LocalKubeConfigPath , kubeCluster . K8sWrapTransport ) ; err != nil {
return fmt . Errorf ( "Failed to apply the ClusterRole and Binding needed for node kubeapi proxy: %v" , err )
}
2017-12-14 23:56:19 +02:00
}
2018-02-26 23:14:04 +02:00
if kubeCluster . Authorization . Mode == services . RBACAuthorizationMode && kubeCluster . Services . KubeAPI . PodSecurityPolicy {
if err := authz . ApplyDefaultPodSecurityPolicy ( ctx , kubeCluster . LocalKubeConfigPath , kubeCluster . K8sWrapTransport ) ; err != nil {
2017-12-20 03:51:07 +02:00
return fmt . Errorf ( "Failed to apply default PodSecurityPolicy: %v" , err )
}
2019-08-02 00:35:56 +02:00
if err := authz . ApplyDefaultPodSecurityPolicyRole ( ctx , kubeCluster . LocalKubeConfigPath , SystemNamespace , kubeCluster . K8sWrapTransport ) ; err != nil {
2017-12-20 03:51:07 +02:00
return fmt . Errorf ( "Failed to apply default PodSecurityPolicy ClusterRole and ClusterRoleBinding: %v" , err )
}
}
2017-12-14 23:56:19 +02:00
return nil
}
2018-01-16 20:29:09 +02:00
2019-06-17 13:52:15 -07:00
func ( c * Cluster ) deployAddons ( ctx context . Context , data map [ string ] interface { } ) error {
if err := c . deployK8sAddOns ( ctx , data ) ; err != nil {
2018-02-01 23:28:31 +02:00
return err
}
2018-05-07 23:51:09 +02:00
if err := c . deployUserAddOns ( ctx ) ; err != nil {
if err , ok := err . ( * addonError ) ; ok && err . isCritical {
return err
}
log . Warnf ( ctx , "Failed to deploy addon execute job [%s]: %v" , UserAddonsIncludeResourceName , err )
}
return nil
2018-02-01 23:28:31 +02:00
}
2018-09-27 01:26:20 +02:00
func ( c * Cluster ) SyncLabelsAndTaints ( ctx context . Context , currentCluster * Cluster ) error {
// Handle issue when deleting all controlplane nodes https://github.com/rancher/rancher/issues/15810
if currentCluster != nil {
2019-03-04 18:59:43 +02:00
cpToDelete := hosts . GetToDeleteHosts ( currentCluster . ControlPlaneHosts , c . ControlPlaneHosts , c . InactiveHosts , false )
2018-09-27 01:26:20 +02:00
if len ( cpToDelete ) == len ( currentCluster . ControlPlaneHosts ) {
2019-09-02 18:30:07 +08:00
log . Infof ( ctx , "[sync] Cleaning left control plane nodes from reconciliation" )
2018-09-27 01:26:20 +02:00
for _ , toDeleteHost := range cpToDelete {
if err := cleanControlNode ( ctx , c , currentCluster , toDeleteHost ) ; err != nil {
return err
}
}
}
}
2019-08-01 22:18:20 +08:00
// sync node taints. Add or remove taints from hosts
syncTaints ( ctx , currentCluster , c )
2018-02-15 05:25:36 +02:00
if len ( c . ControlPlaneHosts ) > 0 {
log . Infof ( ctx , "[sync] Syncing nodes Labels and Taints" )
2018-02-20 13:51:57 +02:00
k8sClient , err := k8s . NewClient ( c . LocalKubeConfigPath , c . K8sWrapTransport )
2018-02-15 05:25:36 +02:00
if err != nil {
return fmt . Errorf ( "Failed to initialize new kubernetes client: %v" , err )
2018-02-01 23:28:31 +02:00
}
2018-10-16 23:52:15 +02:00
hostList := hosts . GetUniqueHostList ( c . EtcdHosts , c . ControlPlaneHosts , c . WorkerHosts )
var errgrp errgroup . Group
hostQueue := make ( chan * hosts . Host , len ( hostList ) )
for _ , host := range hostList {
hostQueue <- host
}
close ( hostQueue )
for i := 0 ; i < SyncWorkers ; i ++ {
w := i
errgrp . Go ( func ( ) error {
var errs [ ] error
for host := range hostQueue {
logrus . Debugf ( "worker [%d] starting sync for node [%s]" , w , host . HostnameOverride )
if err := setNodeAnnotationsLabelsTaints ( k8sClient , host ) ; err != nil {
errs = append ( errs , err )
}
}
if len ( errs ) > 0 {
return fmt . Errorf ( "%v" , errs )
}
return nil
} )
}
if err := errgrp . Wait ( ) ; err != nil {
return err
2018-02-01 23:28:31 +02:00
}
2018-02-15 05:25:36 +02:00
log . Infof ( ctx , "[sync] Successfully synced nodes Labels and Taints" )
2018-02-01 23:28:31 +02:00
}
return nil
}
2018-02-01 23:43:09 +02:00
2018-10-16 23:52:15 +02:00
func setNodeAnnotationsLabelsTaints ( k8sClient * kubernetes . Clientset , host * hosts . Host ) error {
node := & v1 . Node { }
var err error
for retries := 0 ; retries <= 5 ; retries ++ {
node , err = k8s . GetNode ( k8sClient , host . HostnameOverride )
if err != nil {
2020-03-21 18:17:32 +01:00
logrus . Debugf ( "[hosts] Can't find node by name [%s], error: %v" , host . HostnameOverride , err )
2018-10-16 23:52:15 +02:00
time . Sleep ( 2 * time . Second )
continue
}
oldNode := node . DeepCopy ( )
k8s . SetNodeAddressesAnnotations ( node , host . InternalAddress , host . Address )
k8s . SyncNodeLabels ( node , host . ToAddLabels , host . ToDelLabels )
k8s . SyncNodeTaints ( node , host . ToAddTaints , host . ToDelTaints )
if reflect . DeepEqual ( oldNode , node ) {
logrus . Debugf ( "skipping syncing labels for node [%s]" , node . Name )
return nil
}
2020-03-26 14:32:45 -07:00
_ , err = k8sClient . CoreV1 ( ) . Nodes ( ) . Update ( context . TODO ( ) , node , metav1 . UpdateOptions { } )
2018-10-16 23:52:15 +02:00
if err != nil {
logrus . Debugf ( "Error syncing labels for node [%s]: %v" , node . Name , err )
time . Sleep ( 5 * time . Second )
continue
}
return nil
}
return err
}
2018-02-01 23:43:09 +02:00
func ( c * Cluster ) PrePullK8sImages ( ctx context . Context ) error {
log . Infof ( ctx , "Pre-pulling kubernetes images" )
var errgrp errgroup . Group
2018-10-18 00:26:54 +02:00
hostList := hosts . GetUniqueHostList ( c . EtcdHosts , c . ControlPlaneHosts , c . WorkerHosts )
hostsQueue := util . GetObjectQueue ( hostList )
for w := 0 ; w < WorkerThreads ; w ++ {
2018-02-01 23:43:09 +02:00
errgrp . Go ( func ( ) error {
2018-10-18 00:26:54 +02:00
var errList [ ] error
for host := range hostsQueue {
runHost := host . ( * hosts . Host )
err := docker . UseLocalOrPull ( ctx , runHost . DClient , runHost . Address , c . SystemImages . Kubernetes , "pre-deploy" , c . PrivateRegistriesMap )
if err != nil {
errList = append ( errList , err )
}
}
return util . ErrList ( errList )
2018-02-01 23:43:09 +02:00
} )
}
2018-10-18 00:26:54 +02:00
2018-02-01 23:43:09 +02:00
if err := errgrp . Wait ( ) ; err != nil {
return err
}
log . Infof ( ctx , "Kubernetes images pulled successfully" )
return nil
}
2018-02-08 05:13:06 +02:00
2018-03-29 22:58:46 +02:00
func ConfigureCluster (
ctx context . Context ,
rkeConfig v3 . RancherKubernetesEngineConfig ,
crtBundle map [ string ] pki . CertificatePKI ,
2018-11-08 01:54:08 +02:00
flags ExternalFlags ,
dailersOptions hosts . DialersOptions ,
2019-06-17 13:52:15 -07:00
data map [ string ] interface { } ,
2018-03-29 22:58:46 +02:00
useKubectl bool ) error {
2018-02-08 05:13:06 +02:00
// dialer factories are not needed here since we are not uses docker only k8s jobs
2019-10-03 03:56:39 +02:00
kubeCluster , err := InitClusterObject ( ctx , & rkeConfig , flags , "" )
2018-02-08 05:13:06 +02:00
if err != nil {
return err
}
2018-11-08 01:54:08 +02:00
if err := kubeCluster . SetupDialers ( ctx , dailersOptions ) ; err != nil {
2018-11-03 03:45:23 +02:00
return err
}
2018-03-01 14:32:25 -07:00
kubeCluster . UseKubectlDeploy = useKubectl
2018-02-15 05:25:36 +02:00
if len ( kubeCluster . ControlPlaneHosts ) > 0 {
kubeCluster . Certificates = crtBundle
2019-06-17 13:52:15 -07:00
if err := kubeCluster . deployNetworkPlugin ( ctx , data ) ; err != nil {
2018-05-07 23:51:09 +02:00
if err , ok := err . ( * addonError ) ; ok && err . isCritical {
return err
}
log . Warnf ( ctx , "Failed to deploy addon execute job [%s]: %v" , NetworkPluginResourceName , err )
}
2019-06-17 13:52:15 -07:00
if err := kubeCluster . deployAddons ( ctx , data ) ; err != nil {
2018-02-15 05:25:36 +02:00
return err
}
2018-02-08 05:13:06 +02:00
}
2018-02-15 05:25:36 +02:00
return nil
2018-02-08 05:13:06 +02:00
}
2018-08-20 06:37:04 +02:00
func RestartClusterPods ( ctx context . Context , kubeCluster * Cluster ) error {
log . Infof ( ctx , "Restarting network, ingress, and metrics pods" )
// this will remove the pods created by RKE and let the controller creates them again
kubeClient , err := k8s . NewClient ( kubeCluster . LocalKubeConfigPath , kubeCluster . K8sWrapTransport )
if err != nil {
return fmt . Errorf ( "Failed to initialize new kubernetes client: %v" , err )
}
labelsList := [ ] string {
fmt . Sprintf ( "%s=%s" , KubeAppLabel , FlannelNetworkPlugin ) ,
fmt . Sprintf ( "%s=%s" , KubeAppLabel , CanalNetworkPlugin ) ,
2019-08-08 19:10:44 +02:00
fmt . Sprintf ( "%s=%s" , NameLabel , WeaveNetworkAppName ) ,
2018-08-20 06:37:04 +02:00
fmt . Sprintf ( "%s=%s" , AppLabel , NginxIngressAddonAppName ) ,
fmt . Sprintf ( "%s=%s" , KubeAppLabel , DefaultMonitoringProvider ) ,
fmt . Sprintf ( "%s=%s" , KubeAppLabel , KubeDNSAddonAppName ) ,
fmt . Sprintf ( "%s=%s" , KubeAppLabel , KubeDNSAutoscalerAppName ) ,
2018-07-22 11:36:21 +02:00
fmt . Sprintf ( "%s=%s" , KubeAppLabel , CoreDNSAutoscalerAppName ) ,
2019-03-14 06:16:09 +02:00
fmt . Sprintf ( "%s=%s" , AppLabel , KubeAPIAuthAppName ) ,
2019-03-19 21:33:21 +02:00
fmt . Sprintf ( "%s=%s" , AppLabel , CattleClusterAgentAppName ) ,
2018-08-20 06:37:04 +02:00
}
2019-08-08 19:10:44 +02:00
for _ , calicoLabel := range CalicoNetworkLabels {
labelsList = append ( labelsList , fmt . Sprintf ( "%s=%s" , KubeAppLabel , calicoLabel ) )
}
2018-08-20 06:37:04 +02:00
var errgrp errgroup . Group
labelQueue := util . GetObjectQueue ( labelsList )
for w := 0 ; w < services . WorkerThreads ; w ++ {
errgrp . Go ( func ( ) error {
var errList [ ] error
for label := range labelQueue {
runLabel := label . ( string )
// list pods to be deleted
pods , err := k8s . ListPodsByLabel ( kubeClient , runLabel )
if err != nil {
errList = append ( errList , err )
}
// delete pods
err = k8s . DeletePods ( kubeClient , pods )
if err != nil {
errList = append ( errList , err )
}
}
return util . ErrList ( errList )
} )
}
2021-06-06 09:16:44 +02:00
return errgrp . Wait ( )
2018-08-20 06:37:04 +02:00
}
2018-11-01 01:11:57 +02:00
2019-03-09 04:09:16 +02:00
func IsLegacyKubeAPI ( ctx context . Context , kubeCluster * Cluster ) ( bool , error ) {
log . Infof ( ctx , "[controlplane] Check if rotating a legacy cluster" )
for _ , host := range kubeCluster . ControlPlaneHosts {
kubeAPIInspect , err := docker . InspectContainer ( ctx , host . DClient , host . Address , services . KubeAPIContainerName )
if err != nil {
return false , err
}
for _ , arg := range kubeAPIInspect . Args {
if strings . Contains ( arg , serviceAccountTokenFileParam ) &&
strings . Contains ( arg , pki . GetKeyPath ( pki . KubeAPICertName ) ) {
return true , nil
}
}
}
return false , nil
}
2020-07-21 13:35:36 -07:00
func ( c * Cluster ) GetHostInfoMap ( ) map [ string ] types . Info {
hostsInfoMap := make ( map [ string ] types . Info )
allHosts := hosts . GetUniqueHostList ( c . EtcdHosts , c . ControlPlaneHosts , c . WorkerHosts )
for _ , host := range allHosts {
hostsInfoMap [ host . Address ] = host . DockerInfo
}
return hostsInfoMap
}
func ( c * Cluster ) getPrefixPath ( os string ) string {
switch {
case os == "windows" && c . WindowsPrefixPath != "" :
2020-08-19 15:31:10 -07:00
return util . CleanWindowsPath ( c . WindowsPrefixPath )
2020-07-21 13:35:36 -07:00
default :
return c . PrefixPath
}
}
func ( c * Cluster ) getSidecarEntryPoint ( os string ) [ ] string {
switch os {
case "windows" :
return [ ] string { "pwsh" , "-NoLogo" , "-NonInteractive" , "-File" , "c:/usr/bin/sidecar.ps1" }
default :
return [ ] string { "/bin/bash" }
}
}
func ( c * Cluster ) getNginxEntryPoint ( os string ) [ ] string {
switch os {
case "windows" :
return [ ] string { "pwsh" , "-NoLogo" , "-NonInteractive" , "-File" , "c:/usr/bin/nginx-proxy.ps1" }
default :
return [ ] string { "nginx-proxy" }
}
}
func ( c * Cluster ) getRKEToolsEntryPoint ( os , cmd string ) [ ] string {
var entrypoint [ ] string
switch os {
case "windows" :
entrypoint = c . getRKEToolsWindowsEntryPoint ( )
default :
entrypoint = c . getRKEToolsLinuxEntryPoint ( )
}
return append ( entrypoint , cmd )
}
func ( c * Cluster ) getRKEToolsWindowsEntryPoint ( ) [ ] string {
return [ ] string { "pwsh" , "-NoLogo" , "-NonInteractive" , "-File" , "c:/usr/bin/entrypoint.ps1" }
}
func ( c * Cluster ) getRKEToolsLinuxEntryPoint ( ) [ ] string {
v := strings . Split ( c . SystemImages . KubernetesServicesSidecar , ":" )
last := v [ len ( v ) - 1 ]
sv , err := util . StrToSemVer ( last )
if err != nil {
return [ ] string { DefaultToolsEntrypoint }
}
svdefault , err := util . StrToSemVer ( DefaultToolsEntrypointVersion )
if err != nil {
return [ ] string { DefaultToolsEntrypoint }
}
if sv . LessThan ( * svdefault ) {
return [ ] string { LegacyToolsEntrypoint }
}
return [ ] string { DefaultToolsEntrypoint }
}
func ( c * Cluster ) getWindowsEnv ( host * hosts . Host ) [ ] string {
return [ ] string {
fmt . Sprintf ( "%s=%s" , ClusterCIDREnv , c . ClusterCIDR ) ,
fmt . Sprintf ( "%s=%s" , ClusterDomainEnv , c . ClusterDomain ) ,
fmt . Sprintf ( "%s=%s" , ClusterDNSServerEnv , c . ClusterDNSServer ) ,
fmt . Sprintf ( "%s=%s" , ClusterServiceCIDREnv , c . Services . KubeController . ServiceClusterIPRange ) ,
fmt . Sprintf ( "%s=%s" , NodeAddressEnv , host . Address ) ,
fmt . Sprintf ( "%s=%s" , NodeInternalAddressEnv , host . InternalAddress ) ,
fmt . Sprintf ( "%s=%s" , CloudProviderNameEnv , c . CloudProvider . Name ) ,
2020-08-05 15:39:25 -07:00
fmt . Sprintf ( "%s=%s" , NodePrefixPath , host . PrefixPath ) ,
2020-07-21 13:35:36 -07:00
}
}