mirror of
https://github.com/rancher/rke.git
synced 2025-09-16 06:59:25 +00:00
Merge pull request #154 from galal-hussein/dialer_factory
Add Dialer Factory and receive rkeConfig instead of cluster yaml
This commit is contained in:
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
type Cluster struct {
|
||||
v3.RancherKubernetesEngineConfig `yaml:",inline"`
|
||||
ConfigPath string `yaml:"config_path"`
|
||||
ConfigPath string
|
||||
LocalKubeConfigPath string
|
||||
EtcdHosts []*hosts.Host
|
||||
WorkerHosts []*hosts.Host
|
||||
@@ -31,7 +31,7 @@ type Cluster struct {
|
||||
ClusterDomain string
|
||||
ClusterCIDR string
|
||||
ClusterDNSServer string
|
||||
Dialer hosts.Dialer
|
||||
DialerFactory hosts.DialerFactory
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -80,21 +80,30 @@ func (c *Cluster) DeployClusterPlanes() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseConfig(clusterFile string, customDialer hosts.Dialer) (*Cluster, error) {
|
||||
func ParseConfig(clusterFile string) (*v3.RancherKubernetesEngineConfig, error) {
|
||||
logrus.Debugf("Parsing cluster file [%v]", clusterFile)
|
||||
var err error
|
||||
c, err := parseClusterFile(clusterFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse the cluster file: %v", err)
|
||||
var rkeConfig v3.RancherKubernetesEngineConfig
|
||||
if err := yaml.Unmarshal([]byte(clusterFile), &rkeConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Dialer = customDialer
|
||||
err = c.InvertIndexHosts()
|
||||
if err != nil {
|
||||
return &rkeConfig, nil
|
||||
}
|
||||
|
||||
func ParseCluster(rkeConfig *v3.RancherKubernetesEngineConfig, clusterFilePath string, dialerFactory hosts.DialerFactory) (*Cluster, error) {
|
||||
var err error
|
||||
c := &Cluster{
|
||||
RancherKubernetesEngineConfig: *rkeConfig,
|
||||
ConfigPath: clusterFilePath,
|
||||
DialerFactory: dialerFactory,
|
||||
}
|
||||
// Setting cluster Defaults
|
||||
c.setClusterDefaults()
|
||||
|
||||
if err := c.InvertIndexHosts(); err != nil {
|
||||
return nil, fmt.Errorf("Failed to classify hosts from config file: %v", err)
|
||||
}
|
||||
|
||||
err = c.ValidateCluster()
|
||||
if err != nil {
|
||||
if err := c.ValidateCluster(); err != nil {
|
||||
return nil, fmt.Errorf("Failed to validate cluster: %v", err)
|
||||
}
|
||||
|
||||
@@ -112,19 +121,6 @@ func ParseConfig(clusterFile string, customDialer hosts.Dialer) (*Cluster, error
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func parseClusterFile(clusterFile string) (*Cluster, error) {
|
||||
// parse hosts
|
||||
var kubeCluster Cluster
|
||||
err := yaml.Unmarshal([]byte(clusterFile), &kubeCluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Setting cluster Defaults
|
||||
kubeCluster.setClusterDefaults()
|
||||
|
||||
return &kubeCluster, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) setClusterDefaults() {
|
||||
if len(c.SSHKeyPath) == 0 {
|
||||
c.SSHKeyPath = DefaultClusterSSHKeyPath
|
||||
|
@@ -11,18 +11,18 @@ import (
|
||||
|
||||
func (c *Cluster) TunnelHosts() error {
|
||||
for i := range c.EtcdHosts {
|
||||
if err := c.EtcdHosts[i].TunnelUp(); err != nil {
|
||||
if err := c.EtcdHosts[i].TunnelUp(c.DialerFactory); err != nil {
|
||||
return fmt.Errorf("Failed to set up SSH tunneling for Etcd host [%s]: %v", c.EtcdHosts[i].Address, err)
|
||||
}
|
||||
}
|
||||
for i := range c.ControlPlaneHosts {
|
||||
err := c.ControlPlaneHosts[i].TunnelUp()
|
||||
err := c.ControlPlaneHosts[i].TunnelUp(c.DialerFactory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to set up SSH tunneling for Control host [%s]: %v", c.ControlPlaneHosts[i].Address, err)
|
||||
}
|
||||
}
|
||||
for i := range c.WorkerHosts {
|
||||
if err := c.WorkerHosts[i].TunnelUp(); err != nil {
|
||||
if err := c.WorkerHosts[i].TunnelUp(c.DialerFactory); err != nil {
|
||||
return fmt.Errorf("Failed to set up SSH tunneling for Worker host [%s]: %v", c.WorkerHosts[i].Address, err)
|
||||
}
|
||||
}
|
||||
@@ -37,9 +37,6 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
newHost := hosts.Host{
|
||||
RKEConfigNode: host,
|
||||
}
|
||||
if err := newHost.RegisterDialer(c.Dialer); err != nil {
|
||||
return fmt.Errorf("Failed to register new Dialer for host [%s]: %v", host.Address, err)
|
||||
}
|
||||
|
||||
newHost.IgnoreDockerVersion = c.IgnoreDockerVersion
|
||||
|
||||
|
@@ -43,7 +43,7 @@ func reconcileWorker(currentCluster, kubeCluster *Cluster, kubeClient *kubernete
|
||||
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
|
||||
}
|
||||
// attempting to clean services/files on the host
|
||||
if err := reconcileHost(toDeleteHost, true, currentCluster.SystemImages[AplineImage]); err != nil {
|
||||
if err := reconcileHost(toDeleteHost, true, currentCluster.SystemImages[AplineImage], currentCluster.DialerFactory); err != nil {
|
||||
logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err)
|
||||
continue
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
|
||||
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
|
||||
}
|
||||
// attempting to clean services/files on the host
|
||||
if err := reconcileHost(toDeleteHost, false, currentCluster.SystemImages[AplineImage]); err != nil {
|
||||
if err := reconcileHost(toDeleteHost, false, currentCluster.SystemImages[AplineImage], currentCluster.DialerFactory); err != nil {
|
||||
logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err)
|
||||
continue
|
||||
}
|
||||
@@ -96,8 +96,8 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileHost(toDeleteHost *hosts.Host, worker bool, cleanerImage string) error {
|
||||
if err := toDeleteHost.TunnelUp(); err != nil {
|
||||
func reconcileHost(toDeleteHost *hosts.Host, worker bool, cleanerImage string, dialerFactory hosts.DialerFactory) error {
|
||||
if err := toDeleteHost.TunnelUp(dialerFactory); err != nil {
|
||||
return fmt.Errorf("Not able to reach the host: %v", err)
|
||||
}
|
||||
if worker {
|
||||
|
@@ -6,13 +6,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func (c *Cluster) SaveClusterState(clusterFile string) error {
|
||||
func (c *Cluster) SaveClusterState(rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
||||
// Reinitialize kubernetes Client
|
||||
var err error
|
||||
c.KubeClient, err = k8s.NewClient(c.LocalKubeConfigPath)
|
||||
@@ -23,7 +24,7 @@ func (c *Cluster) SaveClusterState(clusterFile string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("[certificates] Failed to Save Kubernetes certificates: %v", err)
|
||||
}
|
||||
err = saveStateToKubernetes(c.KubeClient, c.LocalKubeConfigPath, []byte(clusterFile))
|
||||
err = saveStateToKubernetes(c.KubeClient, c.LocalKubeConfigPath, rkeConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[state] Failed to save configuration state: %v", err)
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func (c *Cluster) GetClusterState() (*Cluster, error) {
|
||||
// Get previous kubernetes certificates
|
||||
if currentCluster != nil {
|
||||
currentCluster.Certificates, err = getClusterCerts(c.KubeClient)
|
||||
currentCluster.Dialer = c.Dialer
|
||||
currentCluster.DialerFactory = c.DialerFactory
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to Get Kubernetes certificates: %v", err)
|
||||
}
|
||||
@@ -75,8 +76,12 @@ func (c *Cluster) GetClusterState() (*Cluster, error) {
|
||||
return currentCluster, nil
|
||||
}
|
||||
|
||||
func saveStateToKubernetes(kubeClient *kubernetes.Clientset, kubeConfigPath string, clusterFile []byte) error {
|
||||
func saveStateToKubernetes(kubeClient *kubernetes.Clientset, kubeConfigPath string, rkeConfig *v3.RancherKubernetesEngineConfig) error {
|
||||
logrus.Infof("[state] Saving cluster state to Kubernetes")
|
||||
clusterFile, err := yaml.Marshal(*rkeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timeout := make(chan bool, 1)
|
||||
go func() {
|
||||
for {
|
||||
|
Reference in New Issue
Block a user