1
0
mirror of https://github.com/rancher/rke.git synced 2025-04-28 03:31:24 +00:00

Command overhaul

remove cluster command
merge upgrade and up commands
rename down command
This commit is contained in:
moelsayed 2017-11-28 13:26:15 +02:00
parent 8f77e50465
commit 07a1441826
17 changed files with 297 additions and 569 deletions

View File

@ -5,7 +5,7 @@ import (
"github.com/rancher/rke/services"
)
func (c *Cluster) ClusterDown() error {
func (c *Cluster) ClusterRemove() error {
// Remove Worker Plane
if err := services.RemoveWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts); err != nil {
return err

View File

@ -1,67 +0,0 @@
package cluster
import (
"fmt"
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/services"
"github.com/sirupsen/logrus"
)
func (c *Cluster) ClusterUpgrade() error {
// make sure all nodes are Ready
logrus.Debugf("[upgrade] Checking node status")
if err := checkK8sNodesState(c.LocalKubeConfigPath); err != nil {
return err
}
// upgrade Contol Plane
logrus.Infof("[upgrade] Upgrading Control Plane Services")
if err := services.UpgradeControlPlane(c.ControlPlaneHosts, c.EtcdHosts, c.Services); err != nil {
return err
}
logrus.Infof("[upgrade] Control Plane Services updgraded successfully")
// upgrade Worker Plane
logrus.Infof("[upgrade] Upgrading Worker Plane Services")
if err := services.UpgradeWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services, c.LocalKubeConfigPath); err != nil {
return err
}
logrus.Infof("[upgrade] Worker Plane Services updgraded successfully")
return nil
}
func checkK8sNodesState(localConfigPath string) error {
k8sClient, err := k8s.NewClient(localConfigPath)
if err != nil {
return err
}
nodeList, err := k8s.GetNodeList(k8sClient)
if err != nil {
return err
}
for _, node := range nodeList.Items {
ready := k8s.IsNodeReady(node)
if !ready {
return fmt.Errorf("[upgrade] Node: %s is NotReady", node.Name)
}
}
logrus.Infof("[upgrade] All nodes are Ready")
return nil
}
func CheckHostsChangedOnUpgrade(kubeCluster, currentCluster *Cluster) error {
etcdChanged := hosts.IsHostListChanged(currentCluster.EtcdHosts, kubeCluster.EtcdHosts)
if etcdChanged {
return fmt.Errorf("Adding or removing Etcd nodes while upgrade is not supported")
}
cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts)
if cpChanged {
return fmt.Errorf("Adding or removing Control plane nodes while upgrade is not supported")
}
workerChanged := hosts.IsHostListChanged(currentCluster.WorkerHosts, kubeCluster.WorkerHosts)
if workerChanged {
return fmt.Errorf("Adding or removing Worker plane nodes while upgrade is not supported")
}
return nil
}

View File

@ -1,286 +0,0 @@
package cmd
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/rancher/rke/cluster"
"github.com/rancher/rke/pki"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"k8s.io/client-go/util/cert"
)
func ClusterCommand() cli.Command {
clusterUpDownUpgradeFlags := []cli.Flag{
cli.StringFlag{
Name: "cluster-file",
Usage: "Specify an alternate cluster YAML file",
Value: cluster.DefaultClusterConfig,
EnvVar: "CLUSTER_FILE",
},
}
clusterForceRemove := []cli.Flag{
cli.BoolFlag{
Name: "force",
Usage: "Force removal of the cluster",
},
}
return cli.Command{
Name: "cluster",
ShortName: "cluster",
Usage: "Operations on the cluster",
Flags: clusterUpDownUpgradeFlags,
Subcommands: []cli.Command{
cli.Command{
Name: "up",
Usage: "Bring the cluster up",
Action: clusterUpFromCli,
Flags: clusterUpDownUpgradeFlags,
},
cli.Command{
Name: "down",
Usage: "Teardown the cluster and clean cluster nodes",
Action: clusterDownFromCli,
Flags: append(clusterUpDownUpgradeFlags, clusterForceRemove...),
},
cli.Command{
Name: "version",
Usage: "Show Cluster Kubernetes version",
Action: getClusterVersion,
Flags: clusterUpDownUpgradeFlags,
},
cli.Command{
Name: "upgrade",
Usage: "Upgrade Cluster Kubernetes version",
Action: clusterUpgradeFromCli,
Flags: clusterUpDownUpgradeFlags,
},
},
}
}
func ClusterUp(clusterFile string) (string, string, string, string, error) {
logrus.Infof("Building Kubernetes cluster")
var APIURL, caCrt, clientCert, clientKey string
kubeCluster, err := cluster.ParseConfig(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.TunnelHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
currentCluster, err := kubeCluster.GetClusterState()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
if err := cluster.CheckEtcdHostsChanged(kubeCluster, currentCluster); err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = cluster.SetUpAuthentication(kubeCluster, currentCluster)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SetUpHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployClusterPlanes()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = cluster.ReconcileCluster(kubeCluster, currentCluster)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SaveClusterState(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployNetworkPlugin()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployK8sAddOns()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployUserAddOns()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key))
logrus.Infof("Finished building Kubernetes cluster successfully")
return APIURL, caCrt, clientCert, clientKey, nil
}
func ClusterUpgrade(clusterFile string) (string, string, string, string, error) {
logrus.Infof("Upgrading Kubernetes cluster")
var APIURL, caCrt, clientCert, clientKey string
kubeCluster, err := cluster.ParseConfig(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Debugf("Getting current cluster")
currentCluster, err := kubeCluster.GetClusterState()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
if currentCluster == nil {
return APIURL, caCrt, clientCert, clientKey, fmt.Errorf("Failed to get the current state of Kubernetes cluster")
}
// check if user try to add/remove hosts during upgrade
if err := cluster.CheckHostsChangedOnUpgrade(kubeCluster, currentCluster); err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
/*
kubeCluster is the cluster.yaml definition. It should have updated configuration
currentCluster is the current state fetched from kubernetes
we add currentCluster certs to kubeCluster, kubeCluster would have the latest configuration from cluster.yaml and the certs to connect to k8s and apply the upgrade
*/
kubeCluster.Certificates = currentCluster.Certificates
logrus.Debugf("Setting up upgrade tunnels")
err = kubeCluster.TunnelHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Debugf("Starting cluster upgrade")
err = kubeCluster.ClusterUpgrade()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SaveClusterState(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
logrus.Infof("Cluster upgraded successfully")
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key))
return APIURL, caCrt, clientCert, clientKey, nil
}
func ClusterDown(clusterFile string) error {
logrus.Infof("Tearing down Kubernetes cluster")
kubeCluster, err := cluster.ParseConfig(clusterFile)
if err != nil {
return err
}
err = kubeCluster.TunnelHosts()
if err != nil {
return err
}
logrus.Debugf("Starting Cluster removal")
err = kubeCluster.ClusterDown()
if err != nil {
return err
}
logrus.Infof("Cluster removed successfully")
return nil
}
func clusterUpFromCli(ctx *cli.Context) error {
clusterFile, err := resolveClusterFile(ctx)
if err != nil {
return fmt.Errorf("Failed to resolve cluster file: %v", err)
}
_, _, _, _, err = ClusterUp(clusterFile)
return err
}
func clusterUpgradeFromCli(ctx *cli.Context) error {
clusterFile, err := resolveClusterFile(ctx)
if err != nil {
return fmt.Errorf("Failed to resolve cluster file: %v", err)
}
_, _, _, _, err = ClusterUpgrade(clusterFile)
return err
}
func clusterDownFromCli(ctx *cli.Context) error {
force := ctx.Bool("force")
if !force {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("Are you sure you want to remove Kubernetes cluster [y/n]: ")
input, err := reader.ReadString('\n')
input = strings.TrimSpace(input)
if err != nil {
return err
}
if input != "y" && input != "Y" {
return nil
}
}
clusterFile, err := resolveClusterFile(ctx)
if err != nil {
return fmt.Errorf("Failed to resolve cluster file: %v", err)
}
return ClusterDown(clusterFile)
}
func resolveClusterFile(ctx *cli.Context) (string, error) {
clusterFile := ctx.String("cluster-file")
fp, err := filepath.Abs(clusterFile)
if err != nil {
return "", fmt.Errorf("failed to lookup current directory name: %v", err)
}
file, err := os.Open(fp)
if err != nil {
return "", fmt.Errorf("Can not find cluster configuration file: %v", err)
}
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err != nil {
return "", fmt.Errorf("failed to read file: %v", err)
}
clusterFileBuff := string(buf)
/*
This is a hacky way to add config path to cluster object without messing with
ClusterUp function and to avoid conflict with calls from kontainer-engine, basically
i add config path (cluster.yml by default) to a field into the config buffer
to be parsed later and added as ConfigPath field into cluster object.
*/
clusterFileBuff = fmt.Sprintf("%s\nconfig_path: %s\n", clusterFileBuff, clusterFile)
return clusterFileBuff, nil
}
func getClusterVersion(ctx *cli.Context) error {
localKubeConfig := cluster.GetLocalKubeConfig(ctx.String("cluster-file"))
serverVersion, err := cluster.GetK8sVersion(localKubeConfig)
if err != nil {
return err
}
fmt.Printf("Server Version: %s\n", serverVersion)
return nil
}

37
cmd/common.go Normal file
View File

@ -0,0 +1,37 @@
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/urfave/cli"
)
func resolveClusterFile(ctx *cli.Context) (string, error) {
clusterFile := ctx.String("config")
fp, err := filepath.Abs(clusterFile)
if err != nil {
return "", fmt.Errorf("failed to lookup current directory name: %v", err)
}
file, err := os.Open(fp)
if err != nil {
return "", fmt.Errorf("Can not find cluster configuration file: %v", err)
}
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err != nil {
return "", fmt.Errorf("failed to read file: %v", err)
}
clusterFileBuff := string(buf)
/*
This is a hacky way to add config path to cluster object without messing with
ClusterUp function and to avoid conflict with calls from kontainer-engine, basically
i add config path (cluster.yml by default) to a field into the config buffer
to be parsed later and added as ConfigPath field into cluster object.
*/
clusterFileBuff = fmt.Sprintf("%s\nconfig_path: %s\n", clusterFileBuff, clusterFile)
return clusterFileBuff, nil
}

View File

@ -179,7 +179,7 @@ func getHostConfig(reader *bufio.Reader, index int) (*v1.RKEConfigHost, error) {
}
host.User = sshUser
dockerSocketPath, err := getConfig(reader, fmt.Sprintf("Docker socker path on host (%s)", advertisedHostname), "/var/run/docker.sock")
dockerSocketPath, err := getConfig(reader, fmt.Sprintf("Docker socket path on host (%s)", advertisedHostname), "/var/run/docker.sock")
if err != nil {
return nil, err
}

76
cmd/remove.go Normal file
View File

@ -0,0 +1,76 @@
package cmd
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/rancher/rke/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func RemoveCommand() cli.Command {
removeFlags := []cli.Flag{
cli.StringFlag{
Name: "config",
Usage: "Specify an alternate cluster YAML file",
Value: cluster.DefaultClusterConfig,
EnvVar: "RKE_CONFIG",
},
cli.BoolFlag{
Name: "force",
Usage: "Force removal of the cluster",
},
}
return cli.Command{
Name: "remove",
Usage: "Teardown the cluster and clean cluster nodes",
Action: clusterRemoveFromCli,
Flags: removeFlags,
}
}
func ClusterRemove(clusterFile string) error {
logrus.Infof("Tearing down Kubernetes cluster")
kubeCluster, err := cluster.ParseConfig(clusterFile)
if err != nil {
return err
}
err = kubeCluster.TunnelHosts()
if err != nil {
return err
}
logrus.Debugf("Starting Cluster removal")
err = kubeCluster.ClusterRemove()
if err != nil {
return err
}
logrus.Infof("Cluster removed successfully")
return nil
}
func clusterRemoveFromCli(ctx *cli.Context) error {
force := ctx.Bool("force")
if !force {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("Are you sure you want to remove Kubernetes cluster [y/n]: ")
input, err := reader.ReadString('\n')
input = strings.TrimSpace(input)
if err != nil {
return err
}
if input != "y" && input != "Y" {
return nil
}
}
clusterFile, err := resolveClusterFile(ctx)
if err != nil {
return fmt.Errorf("Failed to resolve cluster file: %v", err)
}
return ClusterRemove(clusterFile)
}

108
cmd/up.go Normal file
View File

@ -0,0 +1,108 @@
package cmd
import (
"fmt"
"github.com/rancher/rke/cluster"
"github.com/rancher/rke/pki"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"k8s.io/client-go/util/cert"
)
func UpCommand() cli.Command {
upFlags := []cli.Flag{
cli.StringFlag{
Name: "config",
Usage: "Specify an alternate cluster YAML file",
Value: cluster.DefaultClusterConfig,
EnvVar: "RKE_CONFIG",
},
}
return cli.Command{
Name: "up",
Usage: "Bring the cluster up",
Action: clusterUpFromCli,
Flags: upFlags,
}
}
func ClusterUp(clusterFile string) (string, string, string, string, error) {
logrus.Infof("Building Kubernetes cluster")
var APIURL, caCrt, clientCert, clientKey string
kubeCluster, err := cluster.ParseConfig(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.TunnelHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
currentCluster, err := kubeCluster.GetClusterState()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
if err := cluster.CheckEtcdHostsChanged(kubeCluster, currentCluster); err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = cluster.SetUpAuthentication(kubeCluster, currentCluster)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SetUpHosts()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployClusterPlanes()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = cluster.ReconcileCluster(kubeCluster, currentCluster)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SaveClusterState(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployNetworkPlugin()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployK8sAddOns()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployUserAddOns()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key))
logrus.Infof("Finished building Kubernetes cluster successfully")
return APIURL, caCrt, clientCert, clientKey, nil
}
func clusterUpFromCli(ctx *cli.Context) error {
clusterFile, err := resolveClusterFile(ctx)
if err != nil {
return fmt.Errorf("Failed to resolve cluster file: %v", err)
}
_, _, _, _, err = ClusterUp(clusterFile)
return err
}

35
cmd/version.go Normal file
View File

@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"github.com/rancher/rke/cluster"
"github.com/urfave/cli"
)
func VersionCommand() cli.Command {
versionFlags := []cli.Flag{
cli.StringFlag{
Name: "config",
Usage: "Specify an alternate cluster YAML file",
Value: cluster.DefaultClusterConfig,
EnvVar: "RKE_CONFIG",
},
}
return cli.Command{
Name: "version",
Usage: "Show cluster Kubernetes version",
Action: getClusterVersion,
Flags: versionFlags,
}
}
func getClusterVersion(ctx *cli.Context) error {
localKubeConfig := cluster.GetLocalKubeConfig(ctx.String("config"))
serverVersion, err := cluster.GetK8sVersion(localKubeConfig)
if err != nil {
return err
}
fmt.Printf("Server Version: %s\n", serverVersion)
return nil
}

View File

@ -20,9 +20,17 @@ func DoRunContainer(dClient *client.Client, imageCfg *container.Config, hostCfg
}
if isRunning {
logrus.Infof("[%s] Container %s is already running on host [%s]", plane, containerName, hostname)
isUpgradable, err := IsContainerUpgradable(dClient, imageCfg, containerName, hostname, plane)
if err != nil {
return err
}
if isUpgradable {
return DoRollingUpdateContainer(dClient, imageCfg, hostCfg, containerName, hostname, plane)
}
return nil
}
logrus.Debugf("[%s] Pulling Image on host [%s]", plane, hostname)
logrus.Infof("[%s] Pulling Image on host [%s]", plane, hostname)
err = PullImage(dClient, hostname, imageCfg.Image)
if err != nil {
return err
@ -50,6 +58,12 @@ func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config
logrus.Infof("[%s] Container %s is not running on host [%s]", plane, containerName, hostname)
return nil
}
logrus.Infof("[%s] Pulling Image on host [%s]", plane, hostname)
err = PullImage(dClient, hostname, imageCfg.Image)
if err != nil {
return err
}
logrus.Infof("[%s] Successfully pulled %s image on host [%s]", plane, containerName, hostname)
logrus.Debugf("[%s] Stopping old container", plane)
oldContainerName := "old-" + containerName
if err := StopRenameContainer(dClient, hostname, containerName, oldContainerName); err != nil {
@ -70,28 +84,28 @@ func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config
}
func DoRemoveContainer(dClient *client.Client, containerName, hostname string) error {
logrus.Infof("[down/%s] Checking if container is running on host [%s]", containerName, hostname)
logrus.Infof("[remove/%s] Checking if container is running on host [%s]", containerName, hostname)
// not using the wrapper to check if the error is a NotFound error
_, err := dClient.ContainerInspect(context.Background(), containerName)
if err != nil {
if client.IsErrNotFound(err) {
logrus.Infof("[down/%s] Container doesn't exist on host [%s]", containerName, hostname)
logrus.Infof("[remove/%s] Container doesn't exist on host [%s]", containerName, hostname)
return nil
}
return err
}
logrus.Infof("[down/%s] Stopping container on host [%s]", containerName, hostname)
logrus.Infof("[remove/%s] Stopping container on host [%s]", containerName, hostname)
err = StopContainer(dClient, hostname, containerName)
if err != nil {
return err
}
logrus.Infof("[down/%s] Removing container on host [%s]", containerName, hostname)
logrus.Infof("[remove/%s] Removing container on host [%s]", containerName, hostname)
err = RemoveContainer(dClient, hostname, containerName)
if err != nil {
return err
}
logrus.Infof("[down/%s] Sucessfully removed container on host [%s]", containerName, hostname)
logrus.Infof("[remove/%s] Sucessfully removed container on host [%s]", containerName, hostname)
return nil
}
@ -194,3 +208,19 @@ func WaitForContainer(dClient *client.Client, containerName string) error {
}
return nil
}
func IsContainerUpgradable(dClient *client.Client, imageCfg *container.Config, containerName string, hostname string, plane string) (bool, error) {
logrus.Debugf("[%s] Checking if container %s is eligible for upgrade on host [%s]", plane, containerName, hostname)
// this should be moved to a higher layer.
containerInspect, err := InspectContainer(dClient, hostname, containerName)
if err != nil {
return false, err
}
if containerInspect.Config.Image == imageCfg.Image {
logrus.Debugf("[%s] Container %s is not eligible for updgrade on host [%s]", plane, containerName, hostname)
return false, nil
}
logrus.Debugf("[%s] Container %s is eligible for updgrade on host [%s]", plane, containerName, hostname)
return true, nil
}

View File

@ -30,7 +30,9 @@ func mainErr() error {
app.Author = "Rancher Labs, Inc."
app.Email = ""
app.Commands = []cli.Command{
cmd.ClusterCommand(),
cmd.UpCommand(),
cmd.RemoveCommand(),
cmd.VersionCommand(),
cmd.ConfigCommand(),
}
app.Flags = []cli.Flag{

View File

@ -29,29 +29,6 @@ func RunControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlS
return nil
}
func UpgradeControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlServices v1.RKEConfigServices) error {
logrus.Infof("[%s] Upgrading the Controller Plane..", ControlRole)
for _, host := range controlHosts {
// upgrade KubeAPI
if err := upgradeKubeAPI(host, etcdHosts, controlServices.KubeAPI); err != nil {
return err
}
// upgrade KubeController
if err := upgradeKubeController(host, controlServices.KubeController); err != nil {
return nil
}
// upgrade scheduler
err := upgradeScheduler(host, controlServices.Scheduler)
if err != nil {
return err
}
}
logrus.Infof("[%s] Successfully upgraded Controller Plane..", ControlRole)
return nil
}
func RemoveControlPlane(controlHosts []hosts.Host) error {
logrus.Infof("[%s] Tearing down the Controller Plane..", ControlRole)
for _, host := range controlHosts {

View File

@ -9,7 +9,6 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
@ -18,31 +17,6 @@ func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeA
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.AdvertisedHostname, ControlRole)
}
func upgradeKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
logrus.Debugf("[upgrade/KubeAPI] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeAPIContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeAPIService.Image {
logrus.Infof("[upgrade/KubeAPI] KubeAPI is already up to date")
return nil
}
logrus.Debugf("[upgrade/KubeAPI] Stopping old container")
oldContainerName := "old-" + KubeAPIContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeAPIContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/KubeAPI] Deploying new container")
if err := runKubeAPI(host, etcdHosts, kubeAPIService); err != nil {
return err
}
logrus.Debugf("[upgrade/KubeAPI] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func removeKubeAPI(host hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, KubeAPIContainerName, host.AdvertisedHostname)
}

View File

@ -8,7 +8,6 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error {
@ -16,32 +15,6 @@ func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerS
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.AdvertisedHostname, ControlRole)
}
func upgradeKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error {
logrus.Debugf("[upgrade/KubeController] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeControllerContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeControllerService.Image {
logrus.Infof("[upgrade/KubeController] KubeController is already up to date")
return nil
}
logrus.Debugf("[upgrade/KubeController] Stopping old container")
oldContainerName := "old-" + KubeControllerContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeControllerContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/KubeController] Deploying new container")
if err := runKubeController(host, kubeControllerService); err != nil {
return err
}
logrus.Debugf("[upgrade/KubeController] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func removeKubeController(host hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, KubeControllerContainerName, host.AdvertisedHostname)
}

View File

@ -9,7 +9,6 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error {
@ -17,31 +16,6 @@ func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.AdvertisedHostname, WorkerRole)
}
func upgradeKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error {
logrus.Debugf("[upgrade/Kubelet] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeletContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeletService.Image {
logrus.Infof("[upgrade/Kubelet] Kubelet is already up to date")
return nil
}
logrus.Debugf("[upgrade/Kubelet] Stopping old container")
oldContainerName := "old-" + KubeletContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeletContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/Kubelet] Deploying new container")
if err := runKubelet(host, kubeletService, isMaster); err != nil {
return err
}
logrus.Debugf("[upgrade/Kubelet] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func removeKubelet(host hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, KubeletContainerName, host.AdvertisedHostname)
}

View File

@ -8,7 +8,6 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
@ -16,31 +15,6 @@ func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.AdvertisedHostname, WorkerRole)
}
func upgradeKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
logrus.Debugf("[upgrade/Kubeproxy] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, KubeproxyContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == kubeproxyService.Image {
logrus.Infof("[upgrade/Kubeproxy] Kubeproxy is already up to date")
return nil
}
logrus.Debugf("[upgrade/Kubeproxy] Stopping old container")
oldContainerName := "old-" + KubeproxyContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, KubeproxyContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/Kubeproxy] Deploying new container")
if err := runKubeproxy(host, kubeproxyService); err != nil {
return err
}
logrus.Debugf("[upgrade/Kubeproxy] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func removeKubeproxy(host hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, KubeproxyContainerName, host.AdvertisedHostname)
}

View File

@ -8,7 +8,6 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
@ -16,31 +15,6 @@ func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.AdvertisedHostname, ControlRole)
}
func upgradeScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
logrus.Debugf("[upgrade/Scheduler] Checking for deployed version")
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName)
if err != nil {
return err
}
if containerInspect.Config.Image == schedulerService.Image {
logrus.Infof("[upgrade/Scheduler] Scheduler is already up to date")
return nil
}
logrus.Debugf("[upgrade/Scheduler] Stopping old container")
oldContainerName := "old-" + SchedulerContainerName
if err := docker.StopRenameContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName, oldContainerName); err != nil {
return err
}
// Container doesn't exist now!, lets deploy it!
logrus.Debugf("[upgrade/Scheduler] Deploying new container")
if err := runScheduler(host, schedulerService); err != nil {
return err
}
logrus.Debugf("[upgrade/Scheduler] Removing old container")
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
return err
}
func removeScheduler(host hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, SchedulerContainerName, host.AdvertisedHostname)
}

View File

@ -2,7 +2,6 @@ package services
import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/k8s"
"github.com/rancher/types/apis/cluster.cattle.io/v1"
"github.com/sirupsen/logrus"
)
@ -41,58 +40,6 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS
return nil
}
func UpgradeWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices v1.RKEConfigServices, localConfigPath string) error {
logrus.Infof("[%s] Upgrading Worker Plane..", WorkerRole)
k8sClient, err := k8s.NewClient(localConfigPath)
if err != nil {
return err
}
for _, host := range controlHosts {
// cordone the node
logrus.Debugf("[upgrade] Cordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, true); err != nil {
return err
}
err = upgradeKubelet(host, workerServices.Kubelet, true)
if err != nil {
return err
}
err = upgradeKubeproxy(host, workerServices.Kubeproxy)
if err != nil {
return err
}
logrus.Debugf("[upgrade] Uncordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, false); err != nil {
return err
}
}
for _, host := range workerHosts {
// cordone the node
logrus.Debugf("[upgrade] Cordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, true); err != nil {
return err
}
// upgrade kubelet
err := upgradeKubelet(host, workerServices.Kubelet, false)
if err != nil {
return err
}
// upgrade kubeproxy
err = upgradeKubeproxy(host, workerServices.Kubeproxy)
if err != nil {
return err
}
logrus.Debugf("[upgrade] Uncordoning node: %s", host.AdvertisedHostname)
if err = k8s.CordonUncordon(k8sClient, host.AdvertisedHostname, false); err != nil {
return err
}
}
logrus.Infof("[%s] Successfully upgraded Worker Plane..", WorkerRole)
return nil
}
func RemoveWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host) error {
logrus.Infof("[%s] Tearing down Worker Plane..", WorkerRole)
for _, host := range controlHosts {