mirror of
https://github.com/rancher/rke.git
synced 2025-04-28 03:31:24 +00:00
Add Azure cloud provider
This commit is contained in:
parent
28f216e0e6
commit
3694d16212
44
README.md
44
README.md
@ -295,6 +295,50 @@ services:
|
||||
|
||||
Note that RKE only supports connecting to TLS enabled etcd setup, user can enable multiple endpoints in the `external_urls` field. RKE will not accept having external urls and nodes with `etcd` role at the same time, user should only specify either etcd role for servers or external etcd but not both.
|
||||
|
||||
## Cloud Providers
|
||||
|
||||
Starting from v0.1.3 rke supports cloud providers.
|
||||
|
||||
### AWS Cloud Provider
|
||||
|
||||
To enable AWS cloud provider, you can set the following in the cluster configuration file:
|
||||
```
|
||||
cloud_provider:
|
||||
name: aws
|
||||
```
|
||||
|
||||
AWS cloud provider has to be enabled on ec2 instances with the right IAM role.
|
||||
|
||||
### Azure Cloud provider
|
||||
|
||||
Azure cloud provider can be enabled by passing `azure` as the cloud provider name and set of options to the configuration file:
|
||||
```
|
||||
cloud_provider:
|
||||
name: azure
|
||||
cloud_config:
|
||||
aadClientId: xxxxxxxxxxxx
|
||||
aadClientSecret: xxxxxxxxxxx
|
||||
location: westus
|
||||
resourceGroup: rke-rg
|
||||
subnetName: rke-subnet
|
||||
subscriptionId: xxxxxxxxxxx
|
||||
vnetName: rke-vnet
|
||||
tenantId: xxxxxxxxxx
|
||||
securityGroupName: rke-nsg
|
||||
```
|
||||
|
||||
You also have to make sure that the Azure node name must match the kubernetes node name, you can do that by changing the value of hostname_override in the config file:
|
||||
```
|
||||
nodes:
|
||||
- address: x.x.x.x
|
||||
hostname_override: azure-rke1
|
||||
user: ubuntu
|
||||
role:
|
||||
- controlplane
|
||||
- etcd
|
||||
- worker
|
||||
```
|
||||
|
||||
## Operating Systems Notes
|
||||
|
||||
### Atomic OS
|
||||
|
97
cluster/cloud-provider.go
Normal file
97
cluster/cloud-provider.go
Normal file
@ -0,0 +1,97 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/log"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
CloudConfigDeployer = "cloud-config-deployer"
|
||||
CloudConfigServiceName = "cloud"
|
||||
CloudConfigPath = "/etc/kubernetes/cloud-config.json"
|
||||
CloudConfigEnv = "RKE_CLOUD_CONFIG"
|
||||
)
|
||||
|
||||
func deployCloudProviderConfig(ctx context.Context, uniqueHosts []*hosts.Host, cloudProvider v3.CloudProvider, alpineImage string, prsMap map[string]v3.PrivateRegistry) error {
|
||||
cloudConfig, err := getCloudConfigFile(ctx, cloudProvider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, host := range uniqueHosts {
|
||||
log.Infof(ctx, "[%s] Deploying cloud config file to node [%s]", CloudConfigServiceName, host.Address)
|
||||
if err := doDeployConfigFile(ctx, host, cloudConfig, alpineImage, prsMap); err != nil {
|
||||
return fmt.Errorf("Failed to deploy cloud config file on node [%s]: %v", host.Address, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCloudConfigFile(ctx context.Context, cloudProvider v3.CloudProvider) (string, error) {
|
||||
if len(cloudProvider.CloudConfig) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
tmpMap := make(map[string]interface{})
|
||||
for key, value := range cloudProvider.CloudConfig {
|
||||
tmpBool, err := strconv.ParseBool(value)
|
||||
if err == nil {
|
||||
tmpMap[key] = tmpBool
|
||||
continue
|
||||
}
|
||||
tmpInt, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
tmpMap[key] = tmpInt
|
||||
continue
|
||||
}
|
||||
tmpFloat, err := strconv.ParseFloat(value, 64)
|
||||
if err == nil {
|
||||
tmpMap[key] = tmpFloat
|
||||
continue
|
||||
}
|
||||
tmpMap[key] = value
|
||||
}
|
||||
jsonString, err := json.MarshalIndent(tmpMap, "", "\n")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(jsonString), nil
|
||||
}
|
||||
|
||||
func doDeployConfigFile(ctx context.Context, host *hosts.Host, cloudConfig, alpineImage string, prsMap map[string]v3.PrivateRegistry) error {
|
||||
// remove existing container. Only way it's still here is if previous deployment failed
|
||||
if err := docker.DoRemoveContainer(ctx, host.DClient, CloudConfigDeployer, host.Address); err != nil {
|
||||
return err
|
||||
}
|
||||
containerEnv := []string{CloudConfigEnv + "=" + cloudConfig}
|
||||
imageCfg := &container.Config{
|
||||
Image: alpineImage,
|
||||
Cmd: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
fmt.Sprintf("if [ ! -f %s ]; then echo -e \"$%s\" > %s;fi", CloudConfigPath, CloudConfigEnv, CloudConfigPath),
|
||||
},
|
||||
Env: containerEnv,
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
Privileged: true,
|
||||
}
|
||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, CloudConfigDeployer, host.Address, CloudConfigServiceName, prsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := docker.DoRemoveContainer(ctx, host.DClient, CloudConfigDeployer, host.Address); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("[%s] Successfully started cloud config deployer container on node [%s]", CloudConfigServiceName, host.Address)
|
||||
return nil
|
||||
}
|
@ -120,6 +120,12 @@ func (c *Cluster) SetUpHosts(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
log.Infof(ctx, "[certificates] Successfully deployed kubernetes certificates to Cluster nodes")
|
||||
if c.CloudProvider.Name != "" {
|
||||
if err := deployCloudProviderConfig(ctx, hosts, c.CloudProvider, c.SystemImages.Alpine, c.PrivateRegistriesMap); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof(ctx, "[%s] Successfully deployed kubernetes cloud config to Cluster nodes", CloudConfigServiceName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -97,7 +97,9 @@ func (c *Cluster) BuildKubeAPIProcess() v3.Process {
|
||||
"kubelet-client-key": pki.GetKeyPath(pki.KubeAPICertName),
|
||||
"service-account-key-file": pki.GetKeyPath(pki.KubeAPICertName),
|
||||
}
|
||||
|
||||
if len(c.CloudProvider.Name) > 0 {
|
||||
CommandArgs["cloud-config"] = CloudConfigPath
|
||||
}
|
||||
args := []string{
|
||||
"--etcd-cafile=" + etcdCAClientCert,
|
||||
"--etcd-certfile=" + etcdClientCert,
|
||||
@ -174,7 +176,9 @@ func (c *Cluster) BuildKubeControllerProcess() v3.Process {
|
||||
"service-account-private-key-file": pki.GetKeyPath(pki.KubeAPICertName),
|
||||
"root-ca-file": pki.GetCertPath(pki.CACertName),
|
||||
}
|
||||
|
||||
if len(c.CloudProvider.Name) > 0 {
|
||||
CommandArgs["cloud-config"] = CloudConfigPath
|
||||
}
|
||||
args := []string{}
|
||||
if c.Authorization.Mode == services.RBACAuthorizationMode {
|
||||
args = append(args, "--use-service-account-credentials=true")
|
||||
@ -249,6 +253,9 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host) v3.Process {
|
||||
if host.Address != host.InternalAddress {
|
||||
CommandArgs["node-ip"] = host.InternalAddress
|
||||
}
|
||||
if len(c.CloudProvider.Name) > 0 {
|
||||
CommandArgs["cloud-config"] = CloudConfigPath
|
||||
}
|
||||
VolumesFrom := []string{
|
||||
services.SidekickContainerName,
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ type Host struct {
|
||||
|
||||
const (
|
||||
ToCleanEtcdDir = "/var/lib/etcd"
|
||||
ToCleanSSLDir = "/etc/kubernetes/ssl"
|
||||
ToCleanSSLDir = "/etc/kubernetes"
|
||||
ToCleanCNIConf = "/etc/cni"
|
||||
ToCleanCNIBin = "/opt/cni"
|
||||
ToCleanCNILib = "/var/lib/cni"
|
||||
|
Loading…
Reference in New Issue
Block a user