1
0
mirror of https://github.com/rancher/rke.git synced 2025-04-27 19:25:44 +00:00

addons based on k8s jobs and configmaps

This commit is contained in:
moelsayed 2017-11-18 14:51:28 +02:00
parent 0fe952fd10
commit 9e2c352bc7
9 changed files with 511 additions and 109 deletions

32
addons/addons.go Normal file
View File

@ -0,0 +1,32 @@
package addons
func GetAddonsExcuteJob(addonName, nodeName, image string) string {
return `apiVersion: batch/v1
kind: Job
metadata:
name: ` + addonName + `-deploy-job
spec:
template:
metadata:
name: pi
spec:
hostNetwork: true
nodeName: ` + nodeName + `
containers:
- name: ` + addonName + `-pod
image: ` + image + `
command: [ "kubectl", "apply", "-f" , "/etc/config/` + addonName + `.yaml"]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
# Provide the name of the ConfigMap containing the files you want
# to add to the container
name: ` + addonName + `
items:
- key: ` + addonName + `
path: ` + addonName + `.yaml
restartPolicy: Never`
}

191
addons/kubedns.go Normal file
View File

@ -0,0 +1,191 @@
package addons
func GetKubeDNSManifest(clusterDNSServer, clusterDomain string) string {
return `
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=` + clusterDomain + `.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/` + clusterDomain + `/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + clusterDomain + `,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + clusterDomain + `,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: ` + clusterDNSServer + `
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
`
}

View File

@ -44,3 +44,17 @@ services:
infra_container_image: gcr.io/google_containers/pause-amd64:3.0
kubeproxy:
image: quay.io/coreos/hyperkube:v1.7.5_coreos.0
addons: |-
---
apiVersion: v1
kind: Pod
metadata:
name: my-nginx
namespace: default
spec:
containers:
- name: my-nginx
image: nginx
ports:
- containerPort: 80

View File

@ -2,13 +2,16 @@ package cluster
import (
"fmt"
"time"
"github.com/rancher/rke/addons"
"github.com/rancher/rke/k8s"
"github.com/sirupsen/logrus"
)
const (
ClusterDNSServerIPEnvName = "RKE_DNS_SERVER"
ClusterDomainEnvName = "RKE_CLUSTER_DOMAIN"
KubeDNSAddonResourceName = "rke-kubedns-addon"
UserAddonResourceName = "rke-user-addon"
)
func (c *Cluster) DeployK8sAddOns() error {
@ -16,18 +19,84 @@ func (c *Cluster) DeployK8sAddOns() error {
return err
}
func (c *Cluster) deployKubeDNS() error {
logrus.Infof("[plugins] Setting up KubeDNS")
func (c *Cluster) DeployUserAddOns() error {
logrus.Infof("[addons] Setting up user addons..")
if c.Addons == "" {
logrus.Infof("[addons] No user addons configured..")
return nil
}
kubectlCmd := &KubectlCommand{
Cmd: []string{"apply -f /addons/kubedns*.yaml"},
if err := c.doAddonDeploy(c.Addons, UserAddonResourceName); err != nil {
return err
}
logrus.Infof("[plugins] Executing the deploy command..")
err := c.RunKubectlCmd(kubectlCmd)
if err != nil {
return fmt.Errorf("Failed to run kubectl command: %v", err)
}
logrus.Infof("[plugins] kubeDNS deployed successfully..")
logrus.Infof("[addons] User addon deployed successfully..")
return nil
}
func (c *Cluster) deployKubeDNS() error {
logrus.Infof("[addons] Setting up KubeDNS")
kubeDNSYaml := addons.GetKubeDNSManifest(c.ClusterDNSServer, c.ClusterDomain)
if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
return err
}
logrus.Infof("[addons] KubeDNS deployed successfully..")
return nil
}
func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error {
err := c.StoreAddonConfigMap(addonYaml, resourceName)
if err != nil {
return fmt.Errorf("Failed to save addon ConfigMap: %v", err)
}
logrus.Infof("[addons] Executing deploy job..")
addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].AdvertisedHostname, c.Services.KubeAPI.Image)
err = c.ApplySystemAddonExcuteJob(addonJob)
if err != nil {
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
}
return nil
}
func (c *Cluster) StoreAddonConfigMap(addonYaml string, addonName string) error {
logrus.Infof("[addons] Saving addon ConfigMap to Kubernetes")
kubeClient, err := k8s.NewClient(c.LocalKubeConfigPath)
if err != nil {
return err
}
timeout := make(chan bool, 1)
go func() {
for {
err := k8s.UpdateConfigMap(kubeClient, []byte(addonYaml), addonName)
if err != nil {
time.Sleep(time.Second * 5)
fmt.Println(err)
continue
}
logrus.Infof("[addons] Successfully Saved addon to Kubernetes ConfigMap: %s", addonName)
timeout <- true
break
}
}()
select {
case <-timeout:
return nil
case <-time.After(time.Second * UpdateStateTimeout):
return fmt.Errorf("[addons] Timeout waiting for kubernetes to be ready")
}
}
func (c *Cluster) ApplySystemAddonExcuteJob(addonJob string) error {
if err := k8s.ApplyK8sSystemJob(addonJob, c.LocalKubeConfigPath); err != nil {
fmt.Println(err)
return err
}
return nil
}

View File

@ -1,87 +0,0 @@
package cluster
import (
"context"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/rancher/rke/docker"
"github.com/rancher/rke/pki"
"github.com/sirupsen/logrus"
)
const (
KubectlImage = "melsayed/kubectl:latest"
KubctlContainer = "kubectl"
)
type KubectlCommand struct {
Cmd []string
Env []string
}
func (c *Cluster) buildClusterConfigEnv() []string {
// This needs to be updated when add more configuration
environmentMap := map[string]string{
ClusterCIDREnvName: c.ClusterCIDR,
ClusterDNSServerIPEnvName: c.ClusterDNSServer,
ClusterDomainEnvName: c.ClusterDomain,
}
adminConfig := c.Certificates[pki.KubeAdminCommonName]
//build ClusterConfigEnv
env := []string{
adminConfig.ConfigToEnv(),
}
for k, v := range environmentMap {
env = append(env, fmt.Sprintf("%s=%s", k, v))
}
return env
}
func (c *Cluster) RunKubectlCmd(kubectlCmd *KubectlCommand) error {
h := c.ControlPlaneHosts[0]
logrus.Debugf("[kubectl] Using host [%s] for deployment", h.AdvertisedHostname)
logrus.Debugf("[kubectl] Pulling kubectl image..")
if err := docker.PullImage(h.DClient, h.AdvertisedHostname, KubectlImage); err != nil {
return err
}
clusterConfigEnv := c.buildClusterConfigEnv()
if kubectlCmd.Env != nil {
clusterConfigEnv = append(clusterConfigEnv, kubectlCmd.Env...)
}
imageCfg := &container.Config{
Image: KubectlImage,
Env: clusterConfigEnv,
Cmd: kubectlCmd.Cmd,
}
logrus.Debugf("[kubectl] Creating kubectl container..")
resp, err := h.DClient.ContainerCreate(context.Background(), imageCfg, nil, nil, KubctlContainer)
if err != nil {
return fmt.Errorf("Failed to create kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
}
logrus.Debugf("[kubectl] Container %s created..", resp.ID)
if err := h.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("Failed to start kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
}
logrus.Debugf("[kubectl] running command: %s", kubectlCmd.Cmd)
statusCh, errCh := h.DClient.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
return fmt.Errorf("Failed to execute kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
}
case status := <-statusCh:
if status.StatusCode != 0 {
return fmt.Errorf("kubectl command failed on host [%s]: exit status %v", h.AdvertisedHostname, status.StatusCode)
}
}
if err := h.DClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
return fmt.Errorf("Failed to remove kubectl container on host[%s]: %v", h.AdvertisedHostname, err)
}
return nil
}

View File

@ -1,25 +1,21 @@
package cluster
import (
"fmt"
"github.com/rancher/rke/network"
"github.com/sirupsen/logrus"
)
const (
ClusterCIDREnvName = "RKE_CLUSTER_CIDR"
NetworkPluginResourceName = "rke-netwok-plugin"
)
func (c *Cluster) DeployNetworkPlugin() error {
logrus.Infof("[network] Setting up network plugin: %s", c.Network.Plugin)
kubectlCmd := &KubectlCommand{
Cmd: []string{"apply -f /network/" + c.Network.Plugin + ".yaml"},
}
logrus.Infof("[network] Executing the deploy command..")
err := c.RunKubectlCmd(kubectlCmd)
if err != nil {
return fmt.Errorf("Failed to run kubectl command: %v", err)
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR)
if err := c.doAddonDeploy(pluginYaml, NetworkPluginResourceName); err != nil {
return err
}
logrus.Infof("[network] Network plugin deployed successfully..")
return nil

View File

@ -105,6 +105,11 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployUserAddOns()
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))

55
k8s/job.go Normal file
View File

@ -0,0 +1,55 @@
package k8s
import (
"bytes"
"fmt"
"time"
"github.com/sirupsen/logrus"
"k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
)
func ApplyK8sSystemJob(jobYaml, kubeConfigPath string) error {
job := v1.Job{}
decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(jobYaml)))
err := decoder.Decode(&job)
if err != nil {
return err
}
if job.Namespace == metav1.NamespaceNone {
job.Namespace = metav1.NamespaceSystem
}
k8sClient, err := NewClient(kubeConfigPath)
if err != nil {
return err
}
if _, err = k8sClient.BatchV1().Jobs(job.Namespace).Create(&job); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Debugf("[k8s] Job %s already exists..", job.Name)
return nil
}
return err
}
existingJob := &v1.Job{}
for retries := 0; retries <= 5; retries++ {
time.Sleep(time.Second * 1)
existingJob, err = k8sClient.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Failed to update job status: %v", err)
}
for _, condition := range existingJob.Status.Conditions {
if condition.Type == v1.JobComplete && condition.Status == corev1.ConditionTrue {
logrus.Debugf("[k8s] Job %s completed successfully..", job.Name)
return nil
}
}
}
return fmt.Errorf("Failed to get job complete status: %v", err)
}

127
network/flannel.go Normal file
View File

@ -0,0 +1,127 @@
package network
func GetFlannelManifest(clusterCIDR string) string {
return `
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: "kube-system"
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name":"cbr0",
"cniVersion":"0.3.1",
"plugins":[
{
"type":"flannel",
"delegate":{
"forceAddress":true,
"isDefaultGateway":true
}
},
{
"type":"portmap",
"capabilities":{
"portMappings":true
}
}
]
}
net-conf.json: |
{
"Network": "` + clusterCIDR + `",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: "kube-system"
labels:
tier: node
k8s-app: flannel
spec:
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 300m
memory: 500M
requests:
cpu: 150m
memory: 64M
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel-cni:v0.2.0
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate`
}