diff --git a/cluster.yml b/cluster.yml index 0f8549e6..45ecb561 100644 --- a/cluster.yml +++ b/cluster.yml @@ -11,13 +11,13 @@ network: foo: bar hosts: - - hostname: server1 + - advertised_hostname: server1 ip: 1.1.1.1 user: ubuntu role: [controlplane, etcd] docker_socket: /var/run/docker.sock advertise_address: 10.1.1.1 - - hostname: server2 + - advertised_hostname: server2 ip: 2.2.2.2 user: ubuntu role: [worker] diff --git a/cluster/certificates.go b/cluster/certificates.go index 4ece271c..d1c8d780 100644 --- a/cluster/certificates.go +++ b/cluster/certificates.go @@ -16,10 +16,7 @@ func SetUpAuthentication(kubeCluster, currentCluster *Cluster) error { if kubeCluster.Authentication.Strategy == X509AuthenticationProvider { var err error if currentCluster != nil { - kubeCluster.Certificates, err = getClusterCerts(kubeCluster.KubeClient) - if err != nil { - return fmt.Errorf("Failed to Get Kubernetes certificates: %v", err) - } + kubeCluster.Certificates = currentCluster.Certificates } else { kubeCluster.Certificates, err = pki.StartCertificatesGeneration( kubeCluster.ControlPlaneHosts, diff --git a/cluster/state.go b/cluster/state.go index 7355e0d5..552396a5 100644 --- a/cluster/state.go +++ b/cluster/state.go @@ -41,6 +41,10 @@ func (c *Cluster) GetClusterState() (*Cluster, error) { // Handle pervious kubernetes state and certificate generation currentCluster = getStateFromKubernetes(c.KubeClient, pki.KubeAdminConfigPath) if currentCluster != nil { + currentCluster.Certificates, err = getClusterCerts(c.KubeClient) + if err != nil { + return nil, fmt.Errorf("Failed to Get Kubernetes certificates: %v", err) + } err = currentCluster.InvertIndexHosts() if err != nil { return nil, fmt.Errorf("Failed to classify hosts from fetched cluster: %v", err) diff --git a/cluster/upgrade.go b/cluster/upgrade.go new file mode 100644 index 00000000..a0f1a12c --- /dev/null +++ b/cluster/upgrade.go @@ -0,0 +1,51 @@ +package cluster + +import ( + "fmt" + + "github.com/rancher/rke/k8s" + "github.com/rancher/rke/pki" + "github.com/rancher/rke/services" + "github.com/sirupsen/logrus" +) + +func (c *Cluster) ClusterUpgrade() error { + // make sure all nodes are Ready + logrus.Debugf("[upgrade] Checking node status") + if err := checkK8sNodesState(); err != nil { + return err + } + // upgrade Contol Plane + logrus.Infof("[upgrade] Upgrading Control Plane Services") + if err := services.UpgradeControlPlane(c.ControlPlaneHosts, c.EtcdHosts, c.Services); err != nil { + return err + } + logrus.Infof("[upgrade] Control Plane Services updgraded successfully") + + // upgrade Worker Plane + logrus.Infof("[upgrade] Upgrading Worker Plane Services") + if err := services.UpgradeWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services); err != nil { + return err + } + logrus.Infof("[upgrade] Worker Plane Services updgraded successfully") + return nil +} + +func checkK8sNodesState() error { + k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath) + if err != nil { + return err + } + nodeList, err := k8s.GetNodeList(k8sClient) + if err != nil { + return err + } + for _, node := range nodeList.Items { + ready := k8s.IsNodeReady(node) + if !ready { + return fmt.Errorf("[upgrade] Node: %s is NotReady", node.Name) + } + } + logrus.Infof("[upgrade] All nodes are Ready") + return nil +} diff --git a/cmd/cluster.go b/cmd/cluster.go index 2633ac02..6f18a620 100644 --- a/cmd/cluster.go +++ b/cmd/cluster.go @@ -22,6 +22,14 @@ func ClusterCommand() cli.Command { EnvVar: "CLUSTER_FILE", }, } + clusterUpgradeFlags := []cli.Flag{ + cli.StringFlag{ + Name: "cluster-file", + Usage: "Specify an upgraded cluster YAML file", + Value: "cluster.yml", + EnvVar: "CLUSTER_FILE", + }, + } return cli.Command{ Name: "cluster", ShortName: "cluster", @@ -40,6 +48,12 @@ func ClusterCommand() cli.Command { Action: getClusterVersion, Flags: []cli.Flag{}, }, + cli.Command{ + Name: "upgrade", + Usage: "Upgrade Cluster Kubernetes version", + Action: clusterUpgradeFromCli, + Flags: clusterUpgradeFlags, + }, }, } } @@ -66,7 +80,6 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) { if err != nil { return APIURL, caCrt, clientCert, clientKey, err } - err = kubeCluster.SetUpHosts() if err != nil { return APIURL, caCrt, clientCert, clientKey, err @@ -136,3 +149,51 @@ func getClusterVersion(ctx *cli.Context) error { fmt.Printf("Server Version: %s\n", serverVersion) return nil } + +func clusterUpgradeFromCli(ctx *cli.Context) error { + clusterFile, err := resolveClusterFile(ctx) + if err != nil { + return fmt.Errorf("Failed to resolve cluster file: %v", err) + } + _, _, _, _, err = ClusterUpgrade(clusterFile) + return err +} + +func ClusterUpgrade(clusterFile string) (string, string, string, string, error) { + logrus.Infof("Upgrading Kubernetes cluster") + var APIURL, caCrt, clientCert, clientKey string + kubeCluster, err := cluster.ParseConfig(clusterFile) + if err != nil { + return APIURL, caCrt, clientCert, clientKey, err + } + + logrus.Debugf("Getting current cluster") + currentCluster, err := kubeCluster.GetClusterState() + if err != nil { + return APIURL, caCrt, clientCert, clientKey, err + } + logrus.Debugf("Setting up upgrade tunnels") + /* + kubeCluster is the cluster.yaml definition. It should have updated configuration + currentCluster is the current state fetched from kubernetes + we add currentCluster certs to kubeCluster, kubeCluster would have the latest configuration from cluster.yaml and the certs to connect to k8s and apply the upgrade + */ + kubeCluster.Certificates = currentCluster.Certificates + err = kubeCluster.TunnelHosts() + if err != nil { + return APIURL, caCrt, clientCert, clientKey, err + } + logrus.Debugf("Starting cluster upgrade") + err = kubeCluster.ClusterUpgrade() + if err != nil { + return APIURL, caCrt, clientCert, clientKey, err + } + logrus.Infof("Cluster upgraded successfully") + + APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443") + caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate)) + clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate)) + clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key)) + return APIURL, caCrt, clientCert, clientKey, nil + +} diff --git a/docker/docker.go b/docker/docker.go index 9a389534..2cebd983 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -77,3 +77,65 @@ func RemoveContainer(dClient *client.Client, hostname string, containerName stri } return nil } + +func StopContainer(dClient *client.Client, hostname string, containerName string) error { + err := dClient.ContainerStop(context.Background(), containerName, nil) + if err != nil { + return fmt.Errorf("Can't stop Docker container %s for host [%s]: %v", containerName, hostname, err) + } + return nil +} + +func RenameContainer(dClient *client.Client, hostname string, oldContainerName string, newContainerName string) error { + err := dClient.ContainerRename(context.Background(), oldContainerName, newContainerName) + if err != nil { + return fmt.Errorf("Can't rename Docker container %s for host [%s]: %v", oldContainerName, hostname, err) + } + return nil +} + +func StartContainer(dClient *client.Client, hostname string, containerName string) error { + if err := dClient.ContainerStart(context.Background(), containerName, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("Failed to start %s container on host [%s]: %v", containerName, hostname, err) + } + return nil +} + +func CreateContiner(dClient *client.Client, hostname string, containerName string, imageCfg *container.Config, hostCfg *container.HostConfig) (container.ContainerCreateCreatedBody, error) { + created, err := dClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, containerName) + if err != nil { + return container.ContainerCreateCreatedBody{}, fmt.Errorf("Failed to create %s container on host [%s]: %v", containerName, hostname, err) + } + return created, nil +} + +func InspectContainer(dClient *client.Client, hostname string, containerName string) (types.ContainerJSON, error) { + inspection, err := dClient.ContainerInspect(context.Background(), containerName) + if err != nil { + return types.ContainerJSON{}, fmt.Errorf("Failed to inspect %s container on host [%s]: %v", containerName, hostname, err) + } + return inspection, nil +} + +func StopRenameContainer(dClient *client.Client, hostname string, oldContainerName string, newContainerName string) error { + if err := StopContainer(dClient, hostname, oldContainerName); err != nil { + return err + } + if err := WaitForContainer(dClient, oldContainerName); err != nil { + return nil + } + err := RenameContainer(dClient, hostname, oldContainerName, newContainerName) + return err +} + +func WaitForContainer(dClient *client.Client, containerName string) error { + statusCh, errCh := dClient.ContainerWait(context.Background(), containerName, container.WaitConditionNotRunning) + select { + case err := <-errCh: + if err != nil { + return fmt.Errorf("Error wating for container [%s]: %v", containerName, err) + } + case <-statusCh: + } + return nil +} diff --git a/k8s/configmap.go b/k8s/configmap.go new file mode 100644 index 00000000..0398a95b --- /dev/null +++ b/k8s/configmap.go @@ -0,0 +1,34 @@ +package k8s + +import ( + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func UpdateConfigMap(k8sClient *kubernetes.Clientset, configYaml []byte, configMapName string) error { + cfgMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + configMapName: string(configYaml), + }, + } + + if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(cfgMap); err != nil { + if !apierrors.IsAlreadyExists(err) { + return err + } + if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(cfgMap); err != nil { + return err + } + } + return nil +} + +func GetConfigMap(k8sClient *kubernetes.Clientset, configMapName string) (*v1.ConfigMap, error) { + return k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) +} diff --git a/k8s/k8s.go b/k8s/k8s.go index dd5f9001..e2210185 100644 --- a/k8s/k8s.go +++ b/k8s/k8s.go @@ -1,9 +1,6 @@ package k8s import ( - "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" ) @@ -20,72 +17,3 @@ func NewClient(kubeConfigPath string) (*kubernetes.Clientset, error) { } return K8sClientSet, nil } - -func UpdateConfigMap(k8sClient *kubernetes.Clientset, configYaml []byte, configMapName string) error { - cfgMap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - configMapName: string(configYaml), - }, - } - - if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(cfgMap); err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - if _, err := k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(cfgMap); err != nil { - return err - } - } - return nil -} - -func GetConfigMap(k8sClient *kubernetes.Clientset, configMapName string) (*v1.ConfigMap, error) { - return k8sClient.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) -} - -func UpdateSecret(k8sClient *kubernetes.Clientset, fieldName string, secretData []byte, secretName string) error { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string][]byte{ - fieldName: secretData, - }, - } - if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - // update secret if its already exist - oldSecret, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) - if err != nil { - return err - } - newData := oldSecret.Data - newData[fieldName] = secretData - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: metav1.NamespaceSystem, - }, - Data: newData, - } - if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err != nil { - return err - } - } - return nil -} - -func GetSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret, error) { - return k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) -} - -func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error { - return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{}) -} diff --git a/k8s/node.go b/k8s/node.go new file mode 100644 index 00000000..42205029 --- /dev/null +++ b/k8s/node.go @@ -0,0 +1,54 @@ +package k8s + +import ( + "fmt" + + "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error { + return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{}) +} + +func GetNodeList(k8sClient *kubernetes.Clientset) (*v1.NodeList, error) { + return k8sClient.CoreV1().Nodes().List(metav1.ListOptions{}) +} + +func CordonUncordon(k8sClient *kubernetes.Clientset, nodeName string, cordoned bool) error { + updated := false + for retries := 0; retries <= 5; retries++ { + node, err := k8sClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + logrus.Debugf("Error getting node %s: %v", nodeName, err) + continue + } + if node.Spec.Unschedulable == cordoned { + logrus.Debugf("Node %s is already cordoned: %v", nodeName, cordoned) + return nil + } + node.Spec.Unschedulable = cordoned + _, err = k8sClient.CoreV1().Nodes().Update(node) + if err != nil { + logrus.Debugf("Error setting cordoned state for node %s: %v", nodeName, err) + continue + } + updated = true + } + if !updated { + return fmt.Errorf("Failed to set cordonded state for node: %s", nodeName) + } + return nil +} + +func IsNodeReady(node v1.Node) bool { + nodeConditions := node.Status.Conditions + for _, condition := range nodeConditions { + if condition.Type == "Ready" && condition.Status == v1.ConditionTrue { + return true + } + } + return false +} diff --git a/k8s/secret.go b/k8s/secret.go new file mode 100644 index 00000000..16b59164 --- /dev/null +++ b/k8s/secret.go @@ -0,0 +1,47 @@ +package k8s + +import ( + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func GetSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret, error) { + return k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) +} + +func UpdateSecret(k8sClient *kubernetes.Clientset, fieldName string, secretData []byte, secretName string) error { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string][]byte{ + fieldName: secretData, + }, + } + if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret); err != nil { + if !apierrors.IsAlreadyExists(err) { + return err + } + // update secret if its already exist + oldSecret, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) + if err != nil { + return err + } + newData := oldSecret.Data + newData[fieldName] = secretData + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: metav1.NamespaceSystem, + }, + Data: newData, + } + if _, err := k8sClient.CoreV1().Secrets(metav1.NamespaceSystem).Update(secret); err != nil { + return err + } + } + return nil +} diff --git a/services/controlplane.go b/services/controlplane.go index 354aa1a0..a04f00be 100644 --- a/services/controlplane.go +++ b/services/controlplane.go @@ -28,3 +28,26 @@ func RunControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlS logrus.Infof("[%s] Successfully started Controller Plane..", ControlRole) return nil } + +func UpgradeControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlServices v1.RKEConfigServices) error { + logrus.Infof("[%s] Upgrading the Controller Plane..", ControlRole) + for _, host := range controlHosts { + // upgrade KubeAPI + if err := upgradeKubeAPI(host, etcdHosts, controlServices.KubeAPI); err != nil { + return err + } + + // upgrade KubeController + if err := upgradeKubeController(host, controlServices.KubeController); err != nil { + return nil + } + + // upgrade scheduler + err := upgradeScheduler(host, controlServices.Scheduler) + if err != nil { + return err + } + } + logrus.Infof("[%s] Successfully upgraded Controller Plane..", ControlRole) + return nil +} diff --git a/services/kubeapi.go b/services/kubeapi.go index 55b4d3b8..6fbd2851 100644 --- a/services/kubeapi.go +++ b/services/kubeapi.go @@ -9,6 +9,7 @@ import ( "github.com/rancher/rke/hosts" "github.com/rancher/rke/pki" "github.com/rancher/types/apis/cluster.cattle.io/v1" + "github.com/sirupsen/logrus" ) func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error { @@ -17,6 +18,31 @@ func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeA return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.AdvertisedHostname, ControlRole) } +func upgradeKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error { + logrus.Debugf("[upgrade/KubeAPI] Checking for deployed version") + containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeAPIContainerName) + if err != nil { + return err + } + if containerInspect.Config.Image == kubeAPIService.Image { + logrus.Infof("[upgrade/KubeAPI] KubeAPI is already up to date") + return nil + } + logrus.Debugf("[upgrade/KubeAPI] Stopping old container") + oldContainerName := "old-" + KubeAPIContainerName + if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeAPIContainerName, oldContainerName); err != nil { + return err + } + // Container doesn't exist now!, lets deploy it! + logrus.Debugf("[upgrade/KubeAPI] Deploying new container") + if err := runKubeAPI(host, etcdHosts, kubeAPIService); err != nil { + return err + } + logrus.Debugf("[upgrade/KubeAPI] Removing old container") + err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName) + return err +} + func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeAPIService.Image, diff --git a/services/kubecontroller.go b/services/kubecontroller.go index bab8bef7..26a11779 100644 --- a/services/kubecontroller.go +++ b/services/kubecontroller.go @@ -8,6 +8,7 @@ import ( "github.com/rancher/rke/hosts" "github.com/rancher/rke/pki" "github.com/rancher/types/apis/cluster.cattle.io/v1" + "github.com/sirupsen/logrus" ) func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error { @@ -15,6 +16,32 @@ func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerS return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.AdvertisedHostname, ControlRole) } +func upgradeKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error { + logrus.Debugf("[upgrade/KubeController] Checking for deployed version") + containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeControllerContainerName) + if err != nil { + return err + } + if containerInspect.Config.Image == kubeControllerService.Image { + logrus.Infof("[upgrade/KubeController] KubeController is already up to date") + return nil + } + logrus.Debugf("[upgrade/KubeController] Stopping old container") + oldContainerName := "old-" + KubeControllerContainerName + if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeControllerContainerName, oldContainerName); err != nil { + return err + } + // Container doesn't exist now!, lets deploy it! + logrus.Debugf("[upgrade/KubeController] Deploying new container") + if err := runKubeController(host, kubeControllerService); err != nil { + return err + } + logrus.Debugf("[upgrade/KubeController] Removing old container") + err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName) + return err + +} + func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeControllerService.Image, diff --git a/services/kubelet.go b/services/kubelet.go index 012c8480..25934d52 100644 --- a/services/kubelet.go +++ b/services/kubelet.go @@ -9,6 +9,7 @@ import ( "github.com/rancher/rke/hosts" "github.com/rancher/rke/pki" "github.com/rancher/types/apis/cluster.cattle.io/v1" + "github.com/sirupsen/logrus" ) func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error { @@ -16,6 +17,31 @@ func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.AdvertisedHostname, WorkerRole) } +func upgradeKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error { + logrus.Debugf("[upgrade/Kubelet] Checking for deployed version") + containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeletContainerName) + if err != nil { + return err + } + if containerInspect.Config.Image == kubeletService.Image { + logrus.Infof("[upgrade/Kubelet] Kubelet is already up to date") + return nil + } + logrus.Debugf("[upgrade/Kubelet] Stopping old container") + oldContainerName := "old-" + KubeletContainerName + if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeletContainerName, oldContainerName); err != nil { + return err + } + // Container doesn't exist now!, lets deploy it! + logrus.Debugf("[upgrade/Kubelet] Deploying new container") + if err := runKubelet(host, kubeletService, isMaster); err != nil { + return err + } + logrus.Debugf("[upgrade/Kubelet] Removing old container") + err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName) + return err +} + func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeletService.Image, diff --git a/services/kubeproxy.go b/services/kubeproxy.go index 66d5f289..0d3bd8a1 100644 --- a/services/kubeproxy.go +++ b/services/kubeproxy.go @@ -8,6 +8,7 @@ import ( "github.com/rancher/rke/hosts" "github.com/rancher/rke/pki" "github.com/rancher/types/apis/cluster.cattle.io/v1" + "github.com/sirupsen/logrus" ) func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error { @@ -15,6 +16,30 @@ func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error { return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.AdvertisedHostname, WorkerRole) } +func upgradeKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error { + logrus.Debugf("[upgrade/Kubeproxy] Checking for deployed version") + containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeproxyContainerName) + if err != nil { + return err + } + if containerInspect.Config.Image == kubeproxyService.Image { + logrus.Infof("[upgrade/Kubeproxy] Kubeproxy is already up to date") + return nil + } + logrus.Debugf("[upgrade/Kubeproxy] Stopping old container") + oldContainerName := "old-" + KubeproxyContainerName + if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeproxyContainerName, oldContainerName); err != nil { + return err + } + // Container doesn't exist now!, lets deploy it! + logrus.Debugf("[upgrade/Kubeproxy] Deploying new container") + if err := runKubeproxy(host, kubeproxyService); err != nil { + return err + } + logrus.Debugf("[upgrade/Kubeproxy] Removing old container") + err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName) + return err +} func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeproxyService.Image, diff --git a/services/scheduler.go b/services/scheduler.go index dbb1caef..aaea0b67 100644 --- a/services/scheduler.go +++ b/services/scheduler.go @@ -8,12 +8,37 @@ import ( "github.com/rancher/rke/hosts" "github.com/rancher/rke/pki" "github.com/rancher/types/apis/cluster.cattle.io/v1" + "github.com/sirupsen/logrus" ) func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error { imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.AdvertisedHostname, ControlRole) } +func upgradeScheduler(host hosts.Host, schedulerService v1.SchedulerService) error { + logrus.Debugf("[upgrade/Scheduler] Checking for deployed version") + containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName) + if err != nil { + return err + } + if containerInspect.Config.Image == schedulerService.Image { + logrus.Infof("[upgrade/Scheduler] Scheduler is already up to date") + return nil + } + logrus.Debugf("[upgrade/Scheduler] Stopping old container") + oldContainerName := "old-" + SchedulerContainerName + if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName, oldContainerName); err != nil { + return err + } + // Container doesn't exist now!, lets deploy it! + logrus.Debugf("[upgrade/Scheduler] Deploying new container") + if err := runScheduler(host, schedulerService); err != nil { + return err + } + logrus.Debugf("[upgrade/Scheduler] Removing old container") + err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName) + return err +} func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ diff --git a/services/workerplane.go b/services/workerplane.go index a8d547b7..6e0852a0 100644 --- a/services/workerplane.go +++ b/services/workerplane.go @@ -2,6 +2,8 @@ package services import ( "github.com/rancher/rke/hosts" + "github.com/rancher/rke/k8s" + "github.com/rancher/rke/pki" "github.com/rancher/types/apis/cluster.cattle.io/v1" "github.com/sirupsen/logrus" ) @@ -34,3 +36,55 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS logrus.Infof("[%s] Successfully started Worker Plane..", WorkerRole) return nil } + +func UpgradeWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices v1.RKEConfigServices) error { + logrus.Infof("[%s] Upgrading Worker Plane..", WorkerRole) + k8sClient, err := k8s.NewClient(pki.KubeAdminConfigPath) + if err != nil { + return err + } + for _, host := range controlHosts { + // cordone the node + logrus.Debugf("[upgrade] Cordoning node: %s", host.AdvertisedHostname) + if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, true); err != nil { + return err + } + err = upgradeKubelet(host, workerServices.Kubelet, true) + if err != nil { + return err + } + err = upgradeKubeproxy(host, workerServices.Kubeproxy) + if err != nil { + return err + } + + logrus.Debugf("[upgrade] Uncordoning node: %s", host.AdvertisedHostname) + if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, false); err != nil { + return err + } + } + for _, host := range workerHosts { + // cordone the node + logrus.Debugf("[upgrade] Cordoning node: %s", host.AdvertisedHostname) + if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, true); err != nil { + return err + } + // upgrade kubelet + err := upgradeKubelet(host, workerServices.Kubelet, false) + if err != nil { + return err + } + // upgrade kubeproxy + err = upgradeKubeproxy(host, workerServices.Kubeproxy) + if err != nil { + return err + } + + logrus.Debugf("[upgrade] Uncordoning node: %s", host.AdvertisedHostname) + if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, false); err != nil { + return err + } + } + logrus.Infof("[%s] Successfully upgraded Worker Plane..", WorkerRole) + return nil +}