1
0
mirror of https://github.com/rancher/rke.git synced 2025-07-16 08:25:51 +00:00

Remove ingress controller when disabled

This commit is contained in:
moelsayed 2018-07-13 23:48:15 +02:00 committed by Alena Prokharchyk
parent af77619859
commit 241f7857d6
4 changed files with 119 additions and 19 deletions

View File

@ -1,12 +1,40 @@
package addons
import "github.com/rancher/rke/templates"
import (
"fmt"
"strconv"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/templates"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetAddonsExecuteJob(addonName, nodeName, image string) (string, error) {
return getAddonJob(addonName, nodeName, image, false)
}
func GetAddonsDeleteJob(addonName, nodeName, image string) (string, error) {
return getAddonJob(addonName, nodeName, image, true)
}
func getAddonJob(addonName, nodeName, image string, isDelete bool) (string, error) {
jobConfig := map[string]string{
"AddonName": addonName,
"NodeName": nodeName,
"Image": image,
"DeleteJob": strconv.FormatBool(isDelete),
}
return templates.CompileTemplateFromMap(templates.JobDeployerTemplate, jobConfig)
return templates.CompileTemplateFromMap(templates.AddonJobTemplate, jobConfig)
}
func AddonJobExists(addonJobName, kubeConfigPath string, k8sWrapTransport k8s.WrapTransport) (bool, error) {
k8sClient, err := k8s.NewClient(kubeConfigPath, k8sWrapTransport)
if err != nil {
return false, err
}
addonJobStatus, err := k8s.GetK8sJobStatus(k8sClient, addonJobName, metav1.NamespaceSystem)
if err != nil {
return false, fmt.Errorf("Failed to get job [%s] status: %v", addonJobName, err)
}
return addonJobStatus.Created, nil
}

View File

@ -24,6 +24,9 @@ const (
UserAddonResourceName = "rke-user-addon"
IngressAddonResourceName = "rke-ingress-controller"
UserAddonsIncludeResourceName = "rke-user-includes-addons"
IngressAddonJobName = "rke-ingress-controller-deploy-job"
IngressAddonDeleteJobName = "rke-ingress-controller-delete-job"
)
type ingressOptions struct {
@ -226,6 +229,39 @@ func (c *Cluster) doAddonDeploy(ctx context.Context, addonYaml, resourceName str
return nil
}
func (c *Cluster) doAddonDelete(ctx context.Context, resourceName string, isCritical bool) error {
k8sClient, err := k8s.NewClient(c.LocalKubeConfigPath, c.K8sWrapTransport)
if err != nil {
return &addonError{fmt.Sprintf("%v", err), isCritical}
}
node, err := k8s.GetNode(k8sClient, c.ControlPlaneHosts[0].HostnameOverride)
if err != nil {
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
}
deleteJob, err := addons.GetAddonsDeleteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
if err != nil {
return &addonError{fmt.Sprintf("Failed to generate addon delete job: %v", err), isCritical}
}
if err := k8s.ApplyK8sSystemJob(deleteJob, c.LocalKubeConfigPath, c.K8sWrapTransport, c.AddonJobTimeout, false); err != nil {
return &addonError{fmt.Sprintf("%v", err), isCritical}
}
// At this point, the addon should be deleted. We need to clean up by deleting the deploy and delete jobs.
tmpJobYaml, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
if err != nil {
return err
}
if err := k8s.DeleteK8sSystemJob(tmpJobYaml, k8sClient, c.AddonJobTimeout); err != nil {
return err
}
if err := k8s.DeleteK8sSystemJob(deleteJob, k8sClient, c.AddonJobTimeout); err != nil {
return err
}
return nil
}
func (c *Cluster) StoreAddonConfigMap(ctx context.Context, addonYaml string, addonName string) (bool, error) {
log.Infof(ctx, "[addons] Saving addon ConfigMap to Kubernetes")
updated := false
@ -258,7 +294,6 @@ func (c *Cluster) StoreAddonConfigMap(ctx context.Context, addonYaml string, add
func (c *Cluster) ApplySystemAddonExecuteJob(addonJob string, addonUpdated bool) error {
if err := k8s.ApplyK8sSystemJob(addonJob, c.LocalKubeConfigPath, c.K8sWrapTransport, c.AddonJobTimeout, addonUpdated); err != nil {
logrus.Error(err)
return err
}
return nil
@ -266,7 +301,20 @@ func (c *Cluster) ApplySystemAddonExecuteJob(addonJob string, addonUpdated bool)
func (c *Cluster) deployIngress(ctx context.Context) error {
if c.Ingress.Provider == "none" {
log.Infof(ctx, "[ingress] ingress controller is not defined, skipping ingress controller")
addonJobExists, err := addons.AddonJobExists(IngressAddonJobName, c.LocalKubeConfigPath, c.K8sWrapTransport)
if err != nil {
return nil
}
if addonJobExists {
log.Infof(ctx, "[ingress] removing installed ingress controller")
if err := c.doAddonDelete(ctx, IngressAddonResourceName, false); err != nil {
return err
}
log.Infof(ctx, "[ingress] ingress controller removed successfully")
} else {
log.Infof(ctx, "[ingress] ingress controller is disabled, skipping ingress controller")
}
return nil
}
log.Infof(ctx, "[ingress] Setting up %s ingress controller", c.Ingress.Provider)

View File

@ -29,7 +29,7 @@ func ApplyK8sSystemJob(jobYaml, kubeConfigPath string, k8sWrapTransport WrapTran
if err != nil {
return err
}
jobStatus, err := getK8sJobStatus(k8sClient, job.Name, job.Namespace)
jobStatus, err := GetK8sJobStatus(k8sClient, job.Name, job.Namespace)
if err != nil {
return err
}
@ -37,18 +37,10 @@ func ApplyK8sSystemJob(jobYaml, kubeConfigPath string, k8sWrapTransport WrapTran
// I will remove the existing job first, if any
if addonUpdated || (jobStatus.Created && !jobStatus.Completed) {
logrus.Debugf("[k8s] replacing job %s.. ", job.Name)
if err := deleteK8sJob(k8sClient, job.Name, job.Namespace); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
} else { // ignoring NotFound errors
//Jobs take longer to delete than to complete, 2 x the timeout
if err := retryToWithTimeout(ensureJobDeleted, k8sClient, job, timeout*2); err != nil {
return err
}
if err := DeleteK8sSystemJob(jobYaml, k8sClient, timeout); err != nil {
return err
}
}
if _, err = k8sClient.BatchV1().Jobs(job.Namespace).Create(&job); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Debugf("[k8s] Job %s already exists..", job.Name)
@ -60,10 +52,28 @@ func ApplyK8sSystemJob(jobYaml, kubeConfigPath string, k8sWrapTransport WrapTran
return retryToWithTimeout(ensureJobCompleted, k8sClient, job, timeout)
}
func DeleteK8sSystemJob(jobYaml string, k8sClient *kubernetes.Clientset, timeout int) error {
job := v1.Job{}
if err := decodeYamlResource(&job, jobYaml); err != nil {
return err
}
if err := deleteK8sJob(k8sClient, job.Name, job.Namespace); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
} else { // ignoring NotFound errors
//Jobs take longer to delete than to complete, 2 x the timeout
if err := retryToWithTimeout(ensureJobDeleted, k8sClient, job, timeout*2); err != nil {
return err
}
}
return nil
}
func ensureJobCompleted(k8sClient *kubernetes.Clientset, j interface{}) error {
job := j.(v1.Job)
jobStatus, err := getK8sJobStatus(k8sClient, job.Name, job.Namespace)
jobStatus, err := GetK8sJobStatus(k8sClient, job.Name, job.Namespace)
if err != nil {
return fmt.Errorf("Failed to get job complete status: %v", err)
}
@ -100,7 +110,7 @@ func getK8sJob(k8sClient *kubernetes.Clientset, name, namespace string) (*v1.Job
return k8sClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
}
func getK8sJobStatus(k8sClient *kubernetes.Clientset, name, namespace string) (JobStatus, error) {
func GetK8sJobStatus(k8sClient *kubernetes.Clientset, name, namespace string) (JobStatus, error) {
existingJob, err := getK8sJob(k8sClient, name, namespace)
if err != nil {
if apierrors.IsNotFound(err) {

View File

@ -1,18 +1,23 @@
package templates
const JobDeployerTemplate = `
const AddonJobTemplate = `
{{- $addonName := .AddonName }}
{{- $nodeName := .NodeName }}
{{- $image := .Image }}
apiVersion: batch/v1
kind: Job
metadata:
{{- if eq .DeleteJob "true" }}
name: {{$addonName}}-delete-job
{{- else }}
name: {{$addonName}}-deploy-job
{{- end }}
namespace: kube-system
spec:
backoffLimit: 10
template:
metadata:
name: pi
name: rke-deploy
spec:
tolerations:
- key: node-role.kubernetes.io/controlplane
@ -25,9 +30,18 @@ spec:
serviceAccountName: rke-job-deployer
nodeName: {{$nodeName}}
containers:
{{- if eq .DeleteJob "true" }}
- name: {{$addonName}}-delete-pod
{{- else }}
- name: {{$addonName}}-pod
{{- end }}
image: {{$image}}
{{- if eq .DeleteJob "true" }}
command: ["/bin/sh"]
args: ["-c" ,"kubectl get --ignore-not-found=true -f /etc/config/{{$addonName}}.yaml -o name | xargs kubectl delete --ignore-not-found=true"]
{{- else }}
command: [ "kubectl", "apply", "-f" , "/etc/config/{{$addonName}}.yaml"]
{{- end }}
volumeMounts:
- name: config-volume
mountPath: /etc/config