1
0
mirror of https://github.com/rancher/rke.git synced 2025-07-07 04:18:44 +00:00

Merge pull request #15 from moelsayed/cluster_upgrade

Add cluster upgrade
This commit is contained in:
Hussein Galal 2017-11-16 01:59:59 +02:00 committed by GitHub
commit 9e28d35656
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 523 additions and 79 deletions

View File

@ -11,13 +11,13 @@ network:
foo: bar
hosts:
- hostname: server1
- advertised_hostname: server1
ip: 1.1.1.1
user: ubuntu
role: [controlplane, etcd]
docker_socket: /var/run/docker.sock
advertise_address: 10.1.1.1
- hostname: server2
- advertised_hostname: server2
ip: 2.2.2.2
user: ubuntu
role: [worker]

View File

@ -16,10 +16,7 @@ func SetUpAuthentication(kubeCluster, currentCluster *Cluster) error {
if kubeCluster.Authentication.Strategy == X509AuthenticationProvider {
var err error
if currentCluster != nil {
kubeCluster.Certificates, err = getClusterCerts(kubeCluster.KubeClient)
if err != nil {
return fmt.Errorf("Failed to Get Kubernetes certificates: %v", err)
}
kubeCluster.Certificates = currentCluster.Certificates
} else {
kubeCluster.Certificates, err = pki.StartCertificatesGeneration(
kubeCluster.ControlPlaneHosts,

View File

@ -41,6 +41,10 @@ func (c *Cluster) GetClusterState() (*Cluster, error) {
// Handle pervious kubernetes state and certificate generation
currentCluster = getStateFromKubernetes(c.KubeClient, pki.KubeAdminConfigPath)
if currentCluster != nil {
currentCluster.Certificates, err = getClusterCerts(c.KubeClient)
if err != nil {
return nil, fmt.Errorf("Failed to Get Kubernetes certificates: %v", err)
}
err = currentCluster.InvertIndexHosts()
if err != nil {
return nil, fmt.Errorf("Failed to classify hosts from fetched cluster: %v", err)

51
cluster/upgrade.go Normal file
View File

@ -0,0 +1,51 @@
package cluster
import (
"fmt"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/pki"
"github.com/rancher/rke/services"
"github.com/sirupsen/logrus"
)
func (c *Cluster) ClusterUpgrade() error {
// make sure all nodes are Ready
logrus.Debugf("[upgrade] Checking node status")
if err := checkK8sNodesState(); err != nil {
return err
}
// upgrade Contol Plane
logrus.Infof("[upgrade] Upgrading Control Plane Services")
if err := services.UpgradeControlPlane(c.ControlPlaneHosts, c.EtcdHosts, c.Services); err != nil {
return err
}
logrus.Infof("[upgrade] Control Plane Services updgraded successfully")
// upgrade Worker Plane
logrus.Infof("[upgrade] Upgrading Worker Plane Services")
if err := services.UpgradeWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services); err != nil {
return err
}
logrus.Infof("[upgrade] Worker Plane Services updgraded successfully")
return nil
}
func checkK8sNodesState() error {
k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath)
if err != nil {
return err
}
nodeList, err := k8s.GetNodeList(k8sClient)
if err != nil {
return err
}
for _, node := range nodeList.Items {
ready := k8s.IsNodeReady(node)
if !ready {
return fmt.Errorf("[upgrade] Node: %s is NotReady", node.Name)
}
}
logrus.Infof("[upgrade] All nodes are Ready")
return nil
}

View File

@ -22,6 +22,14 @@ func ClusterCommand() cli.Command {
EnvVar: "CLUSTER_FILE",
},
}
clusterUpgradeFlags := []cli.Flag{
cli.StringFlag{
Name: "cluster-file",
Usage: "Specify an upgraded cluster YAML file",
Value: "cluster.yml",
EnvVar: "CLUSTER_FILE",
},
}
return cli.Command{
Name: "cluster",
ShortName: "cluster",
@ -40,6 +48,12 @@ func ClusterCommand() cli.Command {
Action: getClusterVersion,
Flags: []cli.Flag{},
},
cli.Command{
Name: "upgrade",
Usage: "Upgrade Cluster Kubernetes version",
Action: clusterUpgradeFromCli,
Flags: clusterUpgradeFlags,
},
},
}
}
@ -66,7 +80,6 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) {
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SetUpHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
@ -136,3 +149,51 @@ func getClusterVersion(ctx *cli.Context) error {
fmt.Printf("Server Version: %s\n", serverVersion)
return nil
}
func clusterUpgradeFromCli(ctx *cli.Context) error {
clusterFile, err := resolveClusterFile(ctx)
if err != nil {
return fmt.Errorf("Failed to resolve cluster file: %v", err)
}
_, _, _, _, err = ClusterUpgrade(clusterFile)
return err
}
func ClusterUpgrade(clusterFile string) (string, string, string, string, error) {
logrus.Infof("Upgrading Kubernetes cluster")
var APIURL, caCrt, clientCert, clientKey string
kubeCluster, err := cluster.ParseConfig(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Debugf("Getting current cluster")
currentCluster, err := kubeCluster.GetClusterState()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Debugf("Setting up upgrade tunnels")
/*
kubeCluster is the cluster.yaml definition. It should have updated configuration
currentCluster is the current state fetched from kubernetes
we add currentCluster certs to kubeCluster, kubeCluster would have the latest configuration from cluster.yaml and the certs to connect to k8s and apply the upgrade
*/
kubeCluster.Certificates = currentCluster.Certificates
err = kubeCluster.TunnelHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Debugf("Starting cluster upgrade")
err = kubeCluster.ClusterUpgrade()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Infof("Cluster upgraded successfully")
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key))
return APIURL, caCrt, clientCert, clientKey, nil
}

View File

@ -77,3 +77,65 @@ func RemoveContainer(dClient *client.Client, hostname string, containerName stri
}
return nil
}
func StopContainer(dClient *client.Client, hostname string, containerName string) error {
err := dClient.ContainerStop(context.Background(), containerName, nil)
if err != nil {
return fmt.Errorf("Can't stop Docker container %s for host [%s]: %v", containerName, hostname, err)
}
return nil
}
func RenameContainer(dClient *client.Client, hostname string, oldContainerName string, newContainerName string) error {
err := dClient.ContainerRename(context.Background(), oldContainerName, newContainerName)
if err != nil {
return fmt.Errorf("Can't rename Docker container %s for host [%s]: %v", oldContainerName, hostname, err)
}
return nil
}
func StartContainer(dClient *client.Client, hostname string, containerName string) error {
if err := dClient.ContainerStart(context.Background(), containerName, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("Failed to start %s container on host [%s]: %v", containerName, hostname, err)
}
return nil
}
func CreateContiner(dClient *client.Client, hostname string, containerName string, imageCfg *container.Config, hostCfg *container.HostConfig) (container.ContainerCreateCreatedBody, error) {
created, err := dClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, containerName)
if err != nil {
return container.ContainerCreateCreatedBody{}, fmt.Errorf("Failed to create %s container on host [%s]: %v", containerName, hostname, err)
}
return created, nil
}
func InspectContainer(dClient *client.Client, hostname string, containerName string) (types.ContainerJSON, error) {
inspection, err := dClient.ContainerInspect(context.Background(), containerName)
if err != nil {
return types.ContainerJSON{}, fmt.Errorf("Failed to inspect %s container on host [%s]: %v", containerName, hostname, err)
}
return inspection, nil
}
func StopRenameContainer(dClient *client.Client, hostname string, oldContainerName string, newContainerName string) error {
if err := StopContainer(dClient, hostname, oldContainerName); err != nil {
return err
}
if err := WaitForContainer(dClient, oldContainerName); err != nil {
return nil
}
err := RenameContainer(dClient, hostname, oldContainerName, newContainerName)
return err
}
func WaitForContainer(dClient *client.Client, containerName string) error {
statusCh, errCh := dClient.ContainerWait(context.Background(), containerName, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
return fmt.Errorf("Error wating for container [%s]: %v", containerName, err)
}
case <-statusCh:
}
return nil
}

34
k8s/configmap.go Normal file
View File

@ -0,0 +1,34 @@
package k8s
import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func UpdateConfigMap(k8sClient *kubernetes.Clientset, configYaml []byte, configMapName string) error {
cfgMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Namespace: metav1.NamespaceSystem,
},
Data: map[string]string{
configMapName: string(configYaml),
},
}
if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(cfgMap); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(cfgMap); err != nil {
return err
}
}
return nil
}
func GetConfigMap(k8sClient *kubernetes.Clientset, configMapName string) (*v1.ConfigMap, error) {
return k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
}

View File

@ -1,9 +1,6 @@
package k8s
import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
@ -20,72 +17,3 @@ func NewClient(kubeConfigPath string) (*kubernetes.Clientset, error) {
}
return K8sClientSet, nil
}
func UpdateConfigMap(k8sClient *kubernetes.Clientset, configYaml []byte, configMapName string) error {
cfgMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Namespace: metav1.NamespaceSystem,
},
Data: map[string]string{
configMapName: string(configYaml),
},
}
if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(cfgMap); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(cfgMap); err != nil {
return err
}
}
return nil
}
func GetConfigMap(k8sClient *kubernetes.Clientset, configMapName string) (*v1.ConfigMap, error) {
return k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
}
func UpdateSecret(k8sClient *kubernetes.Clientset, fieldName string, secretData []byte, secretName string) error {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: metav1.NamespaceSystem,
},
Data: map[string][]byte{
fieldName: secretData,
},
}
if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
// update secret if its already exist
oldSecret, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
if err != nil {
return err
}
newData := oldSecret.Data
newData[fieldName] = secretData
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: metav1.NamespaceSystem,
},
Data: newData,
}
if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err != nil {
return err
}
}
return nil
}
func GetSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret, error) {
return k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
}
func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error {
return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{})
}

54
k8s/node.go Normal file
View File

@ -0,0 +1,54 @@
package k8s
import (
"fmt"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error {
return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{})
}
func GetNodeList(k8sClient *kubernetes.Clientset) (*v1.NodeList, error) {
return k8sClient.CoreV1().Nodes().List(metav1.ListOptions{})
}
func CordonUncordon(k8sClient *kubernetes.Clientset, nodeName string, cordoned bool) error {
updated := false
for retries := 0; retries <= 5; retries++ {
node, err := k8sClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
logrus.Debugf("Error getting node %s: %v", nodeName, err)
continue
}
if node.Spec.Unschedulable == cordoned {
logrus.Debugf("Node %s is already cordoned: %v", nodeName, cordoned)
return nil
}
node.Spec.Unschedulable = cordoned
_, err = k8sClient.CoreV1().Nodes().Update(node)
if err != nil {
logrus.Debugf("Error setting cordoned state for node %s: %v", nodeName, err)
continue
}
updated = true
}
if !updated {
return fmt.Errorf("Failed to set cordonded state for node: %s", nodeName)
}
return nil
}
func IsNodeReady(node v1.Node) bool {
nodeConditions := node.Status.Conditions
for _, condition := range nodeConditions {
if condition.Type == "Ready" && condition.Status == v1.ConditionTrue {
return true
}
}
return false
}

47
k8s/secret.go Normal file
View File

@ -0,0 +1,47 @@
package k8s
import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func GetSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret, error) {
return k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
}
func UpdateSecret(k8sClient *kubernetes.Clientset, fieldName string, secretData []byte, secretName string) error {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: metav1.NamespaceSystem,
},
Data: map[string][]byte{
fieldName: secretData,
},
}
if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
// update secret if its already exist
oldSecret, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
if err != nil {
return err
}
newData := oldSecret.Data
newData[fieldName] = secretData
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: metav1.NamespaceSystem,
},
Data: newData,
}
if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err != nil {
return err
}
}
return nil
}

View File

@ -28,3 +28,26 @@ func RunControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlS
logrus.Infof("[%s] Successfully started Controller Plane..", ControlRole)
return nil
}
func UpgradeControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlServices v1.RKEConfigServices) error {
logrus.Infof("[%s] Upgrading the Controller Plane..", ControlRole)
for _, host := range controlHosts {
// upgrade KubeAPI
if err := upgradeKubeAPI(host, etcdHosts, controlServices.KubeAPI); err != nil {
return err
}
// upgrade KubeController
if err := upgradeKubeController(host, controlServices.KubeController); err != nil {
return nil
}
// upgrade scheduler
err := upgradeScheduler(host, controlServices.Scheduler)
if err != nil {
return err
}
}
logrus.Infof("[%s] Successfully upgraded Controller Plane..", ControlRole)
return nil
}

View File

@ -9,6 +9,7 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
@ -17,6 +18,31 @@ func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeA
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.AdvertisedHostname, ControlRole)
}
func upgradeKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
logrus.Debugf("[upgrade/KubeAPI] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeAPIContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeAPIService.Image {
logrus.Infof("[upgrade/KubeAPI] KubeAPI is already up to date")
return nil
}
logrus.Debugf("[upgrade/KubeAPI] Stopping old container")
oldContainerName := "old-" + KubeAPIContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeAPIContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/KubeAPI] Deploying new container")
if err := runKubeAPI(host, etcdHosts, kubeAPIService); err != nil {
return err
}
logrus.Debugf("[upgrade/KubeAPI] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{
Image: kubeAPIService.Image,

View File

@ -8,6 +8,7 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error {
@ -15,6 +16,32 @@ func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerS
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.AdvertisedHostname, ControlRole)
}
func upgradeKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error {
logrus.Debugf("[upgrade/KubeController] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeControllerContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeControllerService.Image {
logrus.Infof("[upgrade/KubeController] KubeController is already up to date")
return nil
}
logrus.Debugf("[upgrade/KubeController] Stopping old container")
oldContainerName := "old-" + KubeControllerContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeControllerContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/KubeController] Deploying new container")
if err := runKubeController(host, kubeControllerService); err != nil {
return err
}
logrus.Debugf("[upgrade/KubeController] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{
Image: kubeControllerService.Image,

View File

@ -9,6 +9,7 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error {
@ -16,6 +17,31 @@ func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.AdvertisedHostname, WorkerRole)
}
func upgradeKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error {
logrus.Debugf("[upgrade/Kubelet] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeletContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeletService.Image {
logrus.Infof("[upgrade/Kubelet] Kubelet is already up to date")
return nil
}
logrus.Debugf("[upgrade/Kubelet] Stopping old container")
oldContainerName := "old-" + KubeletContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeletContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/Kubelet] Deploying new container")
if err := runKubelet(host, kubeletService, isMaster); err != nil {
return err
}
logrus.Debugf("[upgrade/Kubelet] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{
Image: kubeletService.Image,

View File

@ -8,6 +8,7 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
@ -15,6 +16,30 @@ func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.AdvertisedHostname, WorkerRole)
}
func upgradeKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
logrus.Debugf("[upgrade/Kubeproxy] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeproxyContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeproxyService.Image {
logrus.Infof("[upgrade/Kubeproxy] Kubeproxy is already up to date")
return nil
}
logrus.Debugf("[upgrade/Kubeproxy] Stopping old container")
oldContainerName := "old-" + KubeproxyContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeproxyContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/Kubeproxy] Deploying new container")
if err := runKubeproxy(host, kubeproxyService); err != nil {
return err
}
logrus.Debugf("[upgrade/Kubeproxy] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{
Image: kubeproxyService.Image,

View File

@ -8,12 +8,37 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService)
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.AdvertisedHostname, ControlRole)
}
func upgradeScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
logrus.Debugf("[upgrade/Scheduler] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == schedulerService.Image {
logrus.Infof("[upgrade/Scheduler] Scheduler is already up to date")
return nil
}
logrus.Debugf("[upgrade/Scheduler] Stopping old container")
oldContainerName := "old-" + SchedulerContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/Scheduler] Deploying new container")
if err := runScheduler(host, schedulerService); err != nil {
return err
}
logrus.Debugf("[upgrade/Scheduler] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{

View File

@ -2,6 +2,8 @@ package services
import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
@ -34,3 +36,55 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS
logrus.Infof("[%s] Successfully started Worker Plane..", WorkerRole)
return nil
}
func UpgradeWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices v1.RKEConfigServices) error {
logrus.Infof("[%s] Upgrading Worker Plane..", WorkerRole)
k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath)
if err != nil {
return err
}
for _, host := range controlHosts {
// cordone the node
logrus.Debugf("[upgrade] Cordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, true); err != nil {
return err
}
err = upgradeKubelet(host, workerServices.Kubelet, true)
if err != nil {
return err
}
err = upgradeKubeproxy(host, workerServices.Kubeproxy)
if err != nil {
return err
}
logrus.Debugf("[upgrade] Uncordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, false); err != nil {
return err
}
}
for _, host := range workerHosts {
// cordone the node
logrus.Debugf("[upgrade] Cordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, true); err != nil {
return err
}
// upgrade kubelet
err := upgradeKubelet(host, workerServices.Kubelet, false)
if err != nil {
return err
}
// upgrade kubeproxy
err = upgradeKubeproxy(host, workerServices.Kubeproxy)
if err != nil {
return err
}
logrus.Debugf("[upgrade] Uncordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, false); err != nil {
return err
}
}
logrus.Infof("[%s] Successfully upgraded Worker Plane..", WorkerRole)
return nil
}