mirror of
https://github.com/rancher/rke.git
synced 2025-09-01 06:56:29 +00:00
Add HA
Add relative path for local kube config Add default cluster yaml config name
This commit is contained in:
@@ -22,6 +22,7 @@ func SetUpAuthentication(kubeCluster, currentCluster *Cluster) error {
|
||||
kubeCluster.ControlPlaneHosts,
|
||||
kubeCluster.WorkerHosts,
|
||||
kubeCluster.ClusterDomain,
|
||||
kubeCluster.LocalKubeConfigPath,
|
||||
kubeCluster.KubernetesServiceIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to generate Kubernetes certificates: %v", err)
|
||||
|
@@ -3,6 +3,7 @@ package cluster
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
|
||||
type Cluster struct {
|
||||
v1.RancherKubernetesEngineConfig `yaml:",inline"`
|
||||
ConfigPath string `yaml:"config_path"`
|
||||
LocalKubeConfigPath string
|
||||
EtcdHosts []hosts.Host
|
||||
WorkerHosts []hosts.Host
|
||||
ControlPlaneHosts []hosts.Host
|
||||
@@ -28,6 +31,7 @@ type Cluster struct {
|
||||
|
||||
const (
|
||||
X509AuthenticationProvider = "x509"
|
||||
DefaultClusterConfig = "cluster.yml"
|
||||
StateConfigMapName = "cluster-state"
|
||||
UpdateStateTimeout = 30
|
||||
GetStateTimeout = 30
|
||||
@@ -69,6 +73,10 @@ func ParseConfig(clusterFile string) (*Cluster, error) {
|
||||
c.ClusterDomain = c.Services.Kubelet.ClusterDomain
|
||||
c.ClusterCIDR = c.Services.KubeController.ClusterCIDR
|
||||
c.ClusterDNSServer = c.Services.Kubelet.ClusterDNSServer
|
||||
if len(c.ConfigPath) == 0 {
|
||||
c.ConfigPath = DefaultClusterConfig
|
||||
}
|
||||
c.LocalKubeConfigPath = GetLocalKubeConfig(c.ConfigPath)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -100,3 +108,10 @@ func parseClusterFile(clusterFile string) (*Cluster, error) {
|
||||
}
|
||||
return &kubeCluster, nil
|
||||
}
|
||||
|
||||
func GetLocalKubeConfig(configPath string) string {
|
||||
baseDir := filepath.Dir(configPath)
|
||||
fileName := filepath.Base(configPath)
|
||||
baseDir += "/"
|
||||
return fmt.Sprintf("%s%s%s", baseDir, pki.KubeAdminConfigPrefix, fileName)
|
||||
}
|
||||
|
@@ -67,7 +67,7 @@ func (c *Cluster) SetUpHosts() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pki.DeployAdminConfig(c.Certificates[pki.KubeAdminCommonName].Config)
|
||||
err = pki.DeployAdminConfig(c.Certificates[pki.KubeAdminCommonName].Config, c.LocalKubeConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -16,7 +15,7 @@ import (
|
||||
func (c *Cluster) SaveClusterState(clusterFile string) error {
|
||||
// Reinitialize kubernetes Client
|
||||
var err error
|
||||
c.KubeClient, err = k8s.NewClient(pki.KubeAdminConfigPath)
|
||||
c.KubeClient, err = k8s.NewClient(c.LocalKubeConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to re-initialize Kubernetes Client: %v", err)
|
||||
}
|
||||
@@ -24,7 +23,7 @@ func (c *Cluster) SaveClusterState(clusterFile string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("[certificates] Failed to Save Kubernetes certificates: %v", err)
|
||||
}
|
||||
err = saveStateToKubernetes(c.KubeClient, pki.KubeAdminConfigPath, []byte(clusterFile))
|
||||
err = saveStateToKubernetes(c.KubeClient, c.LocalKubeConfigPath, []byte(clusterFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("[state] Failed to save configuration state: %v", err)
|
||||
}
|
||||
@@ -34,12 +33,12 @@ func (c *Cluster) SaveClusterState(clusterFile string) error {
|
||||
func (c *Cluster) GetClusterState() (*Cluster, error) {
|
||||
var err error
|
||||
var currentCluster *Cluster
|
||||
c.KubeClient, err = k8s.NewClient(pki.KubeAdminConfigPath)
|
||||
c.KubeClient, err = k8s.NewClient(c.LocalKubeConfigPath)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to initiate new Kubernetes Client: %v", err)
|
||||
} else {
|
||||
// Handle pervious kubernetes state and certificate generation
|
||||
currentCluster = getStateFromKubernetes(c.KubeClient, pki.KubeAdminConfigPath)
|
||||
currentCluster = getStateFromKubernetes(c.KubeClient, c.LocalKubeConfigPath)
|
||||
if currentCluster != nil {
|
||||
currentCluster.Certificates, err = getClusterCerts(c.KubeClient)
|
||||
if err != nil {
|
||||
@@ -113,9 +112,9 @@ func getStateFromKubernetes(kubeClient *kubernetes.Clientset, kubeConfigPath str
|
||||
}
|
||||
}
|
||||
|
||||
func GetK8sVersion() (string, error) {
|
||||
func GetK8sVersion(localConfigPath string) (string, error) {
|
||||
logrus.Debugf("[version] Using admin.config to connect to Kubernetes cluster..")
|
||||
k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath)
|
||||
k8sClient, err := k8s.NewClient(localConfigPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to create Kubernetes Client: %v", err)
|
||||
}
|
||||
|
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -12,7 +11,7 @@ import (
|
||||
func (c *Cluster) ClusterUpgrade() error {
|
||||
// make sure all nodes are Ready
|
||||
logrus.Debugf("[upgrade] Checking node status")
|
||||
if err := checkK8sNodesState(); err != nil {
|
||||
if err := checkK8sNodesState(c.LocalKubeConfigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
// upgrade Contol Plane
|
||||
@@ -24,15 +23,15 @@ func (c *Cluster) ClusterUpgrade() error {
|
||||
|
||||
// upgrade Worker Plane
|
||||
logrus.Infof("[upgrade] Upgrading Worker Plane Services")
|
||||
if err := services.UpgradeWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services); err != nil {
|
||||
if err := services.UpgradeWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services, c.LocalKubeConfigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[upgrade] Worker Plane Services updgraded successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkK8sNodesState() error {
|
||||
k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath)
|
||||
func checkK8sNodesState(localConfigPath string) error {
|
||||
k8sClient, err := k8s.NewClient(localConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ func ClusterCommand() cli.Command {
|
||||
cli.StringFlag{
|
||||
Name: "cluster-file",
|
||||
Usage: "Specify an alternate cluster YAML file",
|
||||
Value: "cluster.yml",
|
||||
Value: cluster.DefaultClusterConfig,
|
||||
EnvVar: "CLUSTER_FILE",
|
||||
},
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func ClusterCommand() cli.Command {
|
||||
Name: "version",
|
||||
Usage: "Show Cluster Kubernetes version",
|
||||
Action: getClusterVersion,
|
||||
Flags: []cli.Flag{},
|
||||
Flags: clusterUpFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "upgrade",
|
||||
@@ -136,13 +136,21 @@ func resolveClusterFile(ctx *cli.Context) (string, error) {
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read file: %v", err)
|
||||
}
|
||||
clusterFile = string(buf)
|
||||
clusterFileBuff := string(buf)
|
||||
|
||||
return clusterFile, nil
|
||||
/*
|
||||
This is a hacky way to add config path to cluster object without messing with
|
||||
ClusterUp function and to avoid conflict with calls from kontainer-engine, basically
|
||||
i add config path (cluster.yml by default) to a field into the config buffer
|
||||
to be parsed later and added as ConfigPath field into cluster object.
|
||||
*/
|
||||
clusterFileBuff = fmt.Sprintf("%s\nconfig_path: %s\n", clusterFileBuff, clusterFile)
|
||||
return clusterFileBuff, nil
|
||||
}
|
||||
|
||||
func getClusterVersion(ctx *cli.Context) error {
|
||||
serverVersion, err := cluster.GetK8sVersion()
|
||||
localKubeConfig := cluster.GetLocalKubeConfig(ctx.String("cluster-file"))
|
||||
serverVersion, err := cluster.GetK8sVersion(localKubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
12
package/nginx-proxy/Dockerfile
Normal file
12
package/nginx-proxy/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM nginx:1.13.6-alpine
|
||||
|
||||
RUN apk add --update curl ca-certificates \
|
||||
&& curl -L -o /usr/bin/confd https://github.com/kelseyhightower/confd/releases/download/v0.12.0-alpha3/confd-0.12.0-alpha3-linux-amd64 \
|
||||
&& chmod +x /usr/bin/confd \
|
||||
&& mkdir -p /etc/confd
|
||||
|
||||
ADD templates /etc/confd/templates/
|
||||
ADD conf.d /etc/confd/conf.d/
|
||||
ADD entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
6
package/nginx-proxy/conf.d/nginx.toml
Normal file
6
package/nginx-proxy/conf.d/nginx.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[template]
|
||||
src = "nginx.tmpl"
|
||||
dest = "/etc/nginx/nginx.conf"
|
||||
keys = [
|
||||
"CP_HOSTS",
|
||||
]
|
7
package/nginx-proxy/entrypoint.sh
Executable file
7
package/nginx-proxy/entrypoint.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Run confd
|
||||
confd -onetime -backend env
|
||||
|
||||
# Start nginx
|
||||
nginx -g 'daemon off;'
|
26
package/nginx-proxy/templates/nginx.tmpl
Normal file
26
package/nginx-proxy/templates/nginx.tmpl
Normal file
@@ -0,0 +1,26 @@
|
||||
error_log stderr notice;
|
||||
|
||||
worker_processes auto;
|
||||
events {
|
||||
multi_accept on;
|
||||
use epoll;
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
stream {
|
||||
upstream kube_apiserver {
|
||||
least_conn;
|
||||
{{ $servers := split (getenv "CP_HOSTS") "," }}{{range $servers}}
|
||||
server {{.}}:6443;
|
||||
{{end}}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 127.0.0.1:6443;
|
||||
proxy_pass kube_apiserver;
|
||||
proxy_timeout 10m;
|
||||
proxy_connect_timeout 1s;
|
||||
|
||||
}
|
||||
|
||||
}
|
@@ -56,6 +56,6 @@ const (
|
||||
|
||||
KubeAdminCommonName = "kube-admin"
|
||||
KubeAdminOrganizationName = "system:masters"
|
||||
KubeAdminConfigPath = "admin.config"
|
||||
KubeAdminConfigPrefix = ".kube_config_"
|
||||
KubeAdminConfigENVName = "KUBECFG_ADMIN"
|
||||
)
|
||||
|
@@ -102,9 +102,9 @@ func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func DeployAdminConfig(kubeConfig string) error {
|
||||
func DeployAdminConfig(kubeConfig, localConfigPath string) error {
|
||||
logrus.Debugf("Deploying admin Kubeconfig locally: %s", kubeConfig)
|
||||
err := ioutil.WriteFile(KubeAdminConfigPath, []byte(kubeConfig), 0644)
|
||||
err := ioutil.WriteFile(localConfigPath, []byte(kubeConfig), 0640)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create local admin kubeconfig file: %v", err)
|
||||
}
|
||||
|
16
pki/pki.go
16
pki/pki.go
@@ -27,16 +27,16 @@ type CertificatePKI struct {
|
||||
}
|
||||
|
||||
// StartCertificatesGeneration ...
|
||||
func StartCertificatesGeneration(cpHosts []hosts.Host, workerHosts []hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) {
|
||||
func StartCertificatesGeneration(cpHosts []hosts.Host, workerHosts []hosts.Host, clusterDomain, localConfigPath string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) {
|
||||
logrus.Infof("[certificates] Generating kubernetes certificates")
|
||||
certs, err := generateCerts(cpHosts, clusterDomain, KubernetesServiceIP)
|
||||
certs, err := generateCerts(cpHosts, clusterDomain, localConfigPath, KubernetesServiceIP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) {
|
||||
func generateCerts(cpHosts []hosts.Host, clusterDomain, localConfigPath string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) {
|
||||
certs := make(map[string]CertificatePKI)
|
||||
// generate CA certificate and key
|
||||
logrus.Infof("[certificates] Generating CA kubernetes certificates")
|
||||
@@ -83,7 +83,7 @@ func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesService
|
||||
certs[KubeControllerName] = CertificatePKI{
|
||||
Certificate: kubeControllerCrt,
|
||||
Key: kubeControllerKey,
|
||||
Config: getKubeConfigX509("https://"+cpHosts[0].AdvertiseAddress+":6443", KubeControllerName, CACertPath, KubeControllerCertPath, KubeControllerKeyPath),
|
||||
Config: getKubeConfigX509("https://127.0.0.1:6443", KubeControllerName, CACertPath, KubeControllerCertPath, KubeControllerKeyPath),
|
||||
Name: KubeControllerName,
|
||||
CommonName: KubeControllerCommonName,
|
||||
EnvName: KubeControllerCertENVName,
|
||||
@@ -104,7 +104,7 @@ func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesService
|
||||
certs[KubeSchedulerName] = CertificatePKI{
|
||||
Certificate: kubeSchedulerCrt,
|
||||
Key: kubeSchedulerKey,
|
||||
Config: getKubeConfigX509("https://"+cpHosts[0].AdvertiseAddress+":6443", KubeSchedulerName, CACertPath, KubeSchedulerCertPath, KubeSchedulerKeyPath),
|
||||
Config: getKubeConfigX509("https://127.0.0.1:6443", KubeSchedulerName, CACertPath, KubeSchedulerCertPath, KubeSchedulerKeyPath),
|
||||
Name: KubeSchedulerName,
|
||||
CommonName: KubeSchedulerCommonName,
|
||||
EnvName: KubeSchedulerCertENVName,
|
||||
@@ -125,7 +125,7 @@ func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesService
|
||||
certs[KubeProxyName] = CertificatePKI{
|
||||
Certificate: kubeProxyCrt,
|
||||
Key: kubeProxyKey,
|
||||
Config: getKubeConfigX509("https://"+cpHosts[0].AdvertiseAddress+":6443", KubeProxyName, CACertPath, KubeProxyCertPath, KubeProxyKeyPath),
|
||||
Config: getKubeConfigX509("https://127.0.0.1:6443", KubeProxyName, CACertPath, KubeProxyCertPath, KubeProxyKeyPath),
|
||||
Name: KubeProxyName,
|
||||
CommonName: KubeProxyCommonName,
|
||||
EnvName: KubeProxyCertENVName,
|
||||
@@ -146,7 +146,7 @@ func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesService
|
||||
certs[KubeNodeName] = CertificatePKI{
|
||||
Certificate: nodeCrt,
|
||||
Key: nodeKey,
|
||||
Config: getKubeConfigX509("https://"+cpHosts[0].AdvertiseAddress+":6443", KubeNodeName, CACertPath, KubeNodeCertPath, KubeNodeKeyPath),
|
||||
Config: getKubeConfigX509("https://127.0.0.1:6443", KubeNodeName, CACertPath, KubeNodeCertPath, KubeNodeKeyPath),
|
||||
Name: KubeNodeName,
|
||||
CommonName: KubeNodeCommonName,
|
||||
OUName: KubeNodeOrganizationName,
|
||||
@@ -175,7 +175,7 @@ func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesService
|
||||
CommonName: KubeAdminCommonName,
|
||||
OUName: KubeAdminOrganizationName,
|
||||
ConfigEnvName: KubeAdminConfigENVName,
|
||||
ConfigPath: KubeAdminConfigPath,
|
||||
ConfigPath: localConfigPath,
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ func TestPKI(t *testing.T) {
|
||||
DClient: nil,
|
||||
},
|
||||
}
|
||||
certificateMap, err := StartCertificatesGeneration(cpHosts, cpHosts, FakeClusterDomain, net.ParseIP(FakeKubernetesServiceIP))
|
||||
certificateMap, err := StartCertificatesGeneration(cpHosts, cpHosts, FakeClusterDomain, "", net.ParseIP(FakeKubernetesServiceIP))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed To generate certificates: %v", err)
|
||||
}
|
||||
|
@@ -13,8 +13,9 @@ import (
|
||||
|
||||
func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error {
|
||||
logrus.Infof("[%s] Building up Etcd Plane..", ETCDRole)
|
||||
initCluster := getEtcdInitialCluster(etcdHosts)
|
||||
for _, host := range etcdHosts {
|
||||
imageCfg, hostCfg := buildEtcdConfig(host, etcdService)
|
||||
imageCfg, hostCfg := buildEtcdConfig(host, etcdService, initCluster)
|
||||
err := docker.DoRunContainer(host.DClient, imageCfg, hostCfg, EtcdContainerName, host.AdvertisedHostname, ETCDRole)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -24,7 +25,7 @@ func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService) (*container.Config, *container.HostConfig) {
|
||||
func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: etcdService.Image,
|
||||
Cmd: []string{"/usr/local/bin/etcd",
|
||||
@@ -35,7 +36,8 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService) (*container.Co
|
||||
"--initial-advertise-peer-urls=http://" + host.AdvertiseAddress + ":2380",
|
||||
"--listen-peer-urls=http://0.0.0.0:2380",
|
||||
"--initial-cluster-token=etcd-cluster-1",
|
||||
"--initial-cluster=etcd-" + host.AdvertisedHostname + "=http://" + host.AdvertiseAddress + ":2380"},
|
||||
"--initial-cluster=" + initCluster,
|
||||
"--initial-cluster-state=new"},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
@@ -74,3 +76,14 @@ func getEtcdConnString(hosts []hosts.Host) string {
|
||||
}
|
||||
return connString
|
||||
}
|
||||
|
||||
func getEtcdInitialCluster(hosts []hosts.Host) string {
|
||||
initialCluster := ""
|
||||
for i, host := range hosts {
|
||||
initialCluster += fmt.Sprintf("etcd-%s=http://%s:2380", host.AdvertisedHostname, host.AdvertiseAddress)
|
||||
if i < (len(hosts) - 1) {
|
||||
initialCluster += ","
|
||||
}
|
||||
}
|
||||
return initialCluster
|
||||
}
|
||||
|
@@ -48,8 +48,9 @@ func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdC
|
||||
Image: kubeAPIService.Image,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"apiserver",
|
||||
"--insecure-bind-address=0.0.0.0",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--insecure-port=8080",
|
||||
"--secure-port=6443",
|
||||
"--cloud-provider=",
|
||||
"--allow_privileged=true",
|
||||
"--service-cluster-ip-range=" + kubeAPIService.ServiceClusterIPRange,
|
||||
|
@@ -49,6 +49,7 @@ func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (
|
||||
"controller-manager",
|
||||
"--address=0.0.0.0",
|
||||
"--cloud-provider=",
|
||||
"--leader-elect=true",
|
||||
"--kubeconfig=" + pki.KubeControllerConfigPath,
|
||||
"--enable-hostpath-provisioner=false",
|
||||
"--node-monitor-grace-period=40s",
|
||||
@@ -65,6 +66,7 @@ func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
for arg, value := range kubeControllerService.ExtraArgs {
|
||||
|
44
services/proxy.go
Normal file
44
services/proxy.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
const (
|
||||
NginxProxyImage = "husseingalal/nginx-nodeporxy:dev"
|
||||
NginxProxyEnvName = "CP_HOSTS"
|
||||
)
|
||||
|
||||
func runNginxProxy(host hosts.Host, cpHosts []hosts.Host) error {
|
||||
nginxProxyEnv := buildProxyEnv(cpHosts)
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
}
|
||||
|
||||
func buildNginxProxyConfig(host hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: NginxProxyImage,
|
||||
Env: []string{fmt.Sprintf("%s=%s", NginxProxyEnvName, nginxProxyEnv)},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
||||
func buildProxyEnv(cpHosts []hosts.Host) string {
|
||||
proxyEnv := ""
|
||||
for i, cpHost := range cpHosts {
|
||||
proxyEnv += fmt.Sprintf("%s", cpHost.AdvertiseAddress)
|
||||
if i < (len(cpHosts) - 1) {
|
||||
proxyEnv += ","
|
||||
}
|
||||
}
|
||||
return proxyEnv
|
||||
}
|
@@ -45,6 +45,7 @@ func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService)
|
||||
Image: schedulerService.Image,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"scheduler",
|
||||
"--leader-elect=true",
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--kubeconfig=" + pki.KubeSchedulerConfigPath,
|
||||
@@ -54,6 +55,7 @@ func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService)
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
for arg, value := range schedulerService.ExtraArgs {
|
||||
|
@@ -16,12 +16,13 @@ const (
|
||||
KubeControllerContainerName = "kube-controller"
|
||||
SchedulerContainerName = "scheduler"
|
||||
EtcdContainerName = "etcd"
|
||||
NginxProxyContainerName = "nginx-proxy"
|
||||
)
|
||||
|
||||
func GetKubernetesServiceIP(serviceClusterRange string) (net.IP, error) {
|
||||
ip, ipnet, err := net.ParseCIDR(serviceClusterRange)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get kubernetes service IP: %v", err)
|
||||
return nil, fmt.Errorf("Failed to get kubernetes service IP from Kube API option [service_cluster_ip_range]: %v", err)
|
||||
}
|
||||
ip = ip.Mask(ipnet.Mask)
|
||||
for j := len(ip) - 1; j >= 0; j-- {
|
||||
|
@@ -3,7 +3,6 @@ package services
|
||||
import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -22,8 +21,13 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS
|
||||
}
|
||||
}
|
||||
for _, host := range workerHosts {
|
||||
// run nginx proxy
|
||||
err := runNginxProxy(host, controlHosts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubelet
|
||||
err := runKubelet(host, workerServices.Kubelet, false)
|
||||
err = runKubelet(host, workerServices.Kubelet, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -37,9 +41,9 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpgradeWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices v1.RKEConfigServices) error {
|
||||
func UpgradeWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices v1.RKEConfigServices, localConfigPath string) error {
|
||||
logrus.Infof("[%s] Upgrading Worker Plane..", WorkerRole)
|
||||
k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath)
|
||||
k8sClient, err := k8s.NewClient(localConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user