mirror of
https://github.com/rancher/rke.git
synced 2025-07-11 14:23:42 +00:00
Merge pull request #22 from moelsayed/job_based_addons
System and user addons based on k8s jobs and configmaps
This commit is contained in:
commit
e53f7adf02
32
addons/addons.go
Normal file
32
addons/addons.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package addons
|
||||||
|
|
||||||
|
func GetAddonsExcuteJob(addonName, nodeName, image string) string {
|
||||||
|
return `apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: ` + addonName + `-deploy-job
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: pi
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
nodeName: ` + nodeName + `
|
||||||
|
containers:
|
||||||
|
- name: ` + addonName + `-pod
|
||||||
|
image: ` + image + `
|
||||||
|
command: [ "kubectl", "apply", "-f" , "/etc/config/` + addonName + `.yaml"]
|
||||||
|
volumeMounts:
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /etc/config
|
||||||
|
volumes:
|
||||||
|
- name: config-volume
|
||||||
|
configMap:
|
||||||
|
# Provide the name of the ConfigMap containing the files you want
|
||||||
|
# to add to the container
|
||||||
|
name: ` + addonName + `
|
||||||
|
items:
|
||||||
|
- key: ` + addonName + `
|
||||||
|
path: ` + addonName + `.yaml
|
||||||
|
restartPolicy: Never`
|
||||||
|
}
|
191
addons/kubedns.go
Normal file
191
addons/kubedns.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
package addons
|
||||||
|
|
||||||
|
func GetKubeDNSManifest(clusterDNSServer, clusterDomain string) string {
|
||||||
|
return `
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: kube-dns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
---
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: kube-dns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
spec:
|
||||||
|
# replicas: not specified here:
|
||||||
|
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||||
|
# 2. Default is 1.
|
||||||
|
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxSurge: 10%
|
||||||
|
maxUnavailable: 0
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
tolerations:
|
||||||
|
- key: "CriticalAddonsOnly"
|
||||||
|
operator: "Exists"
|
||||||
|
volumes:
|
||||||
|
- name: kube-dns-config
|
||||||
|
configMap:
|
||||||
|
name: kube-dns
|
||||||
|
optional: true
|
||||||
|
containers:
|
||||||
|
- name: kubedns
|
||||||
|
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
memory: 170Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 70Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthcheck/kubedns
|
||||||
|
port: 10054
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readiness
|
||||||
|
port: 8081
|
||||||
|
scheme: HTTP
|
||||||
|
# we poll on pod startup for the Kubernetes master service and
|
||||||
|
# only setup the /readiness HTTP server once that's available.
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
timeoutSeconds: 5
|
||||||
|
args:
|
||||||
|
- --domain=` + clusterDomain + `.
|
||||||
|
- --dns-port=10053
|
||||||
|
- --config-dir=/kube-dns-config
|
||||||
|
- --v=2
|
||||||
|
env:
|
||||||
|
- name: PROMETHEUS_PORT
|
||||||
|
value: "10055"
|
||||||
|
ports:
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-local
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-tcp-local
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 10055
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: kube-dns-config
|
||||||
|
mountPath: /kube-dns-config
|
||||||
|
- name: dnsmasq
|
||||||
|
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthcheck/dnsmasq
|
||||||
|
port: 10054
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
args:
|
||||||
|
- -v=2
|
||||||
|
- -logtostderr
|
||||||
|
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||||
|
- -restartDnsmasq=true
|
||||||
|
- --
|
||||||
|
- -k
|
||||||
|
- --cache-size=1000
|
||||||
|
- --log-facility=-
|
||||||
|
- --server=/` + clusterDomain + `/127.0.0.1#10053
|
||||||
|
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||||
|
- --server=/ip6.arpa/127.0.0.1#10053
|
||||||
|
ports:
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 150m
|
||||||
|
memory: 20Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: kube-dns-config
|
||||||
|
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||||
|
- name: sidecar
|
||||||
|
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /metrics
|
||||||
|
port: 10054
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
args:
|
||||||
|
- --v=2
|
||||||
|
- --logtostderr
|
||||||
|
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + clusterDomain + `,5,A
|
||||||
|
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + clusterDomain + `,5,A
|
||||||
|
ports:
|
||||||
|
- containerPort: 10054
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: 20Mi
|
||||||
|
cpu: 10m
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
|
serviceAccountName: kube-dns
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kube-dns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
kubernetes.io/name: "KubeDNS"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
clusterIP: ` + clusterDNSServer + `
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
||||||
|
|
||||||
|
`
|
||||||
|
}
|
14
cluster.yml
14
cluster.yml
@ -44,3 +44,17 @@ services:
|
|||||||
infra_container_image: gcr.io/google_containers/pause-amd64:3.0
|
infra_container_image: gcr.io/google_containers/pause-amd64:3.0
|
||||||
kubeproxy:
|
kubeproxy:
|
||||||
image: quay.io/coreos/hyperkube:v1.7.5_coreos.0
|
image: quay.io/coreos/hyperkube:v1.7.5_coreos.0
|
||||||
|
|
||||||
|
addons: |-
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: my-nginx
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: my-nginx
|
||||||
|
image: nginx
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
@ -2,13 +2,16 @@ package cluster
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rancher/rke/addons"
|
||||||
|
"github.com/rancher/rke/k8s"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ClusterDNSServerIPEnvName = "RKE_DNS_SERVER"
|
KubeDNSAddonResourceName = "rke-kubedns-addon"
|
||||||
ClusterDomainEnvName = "RKE_CLUSTER_DOMAIN"
|
UserAddonResourceName = "rke-user-addon"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Cluster) DeployK8sAddOns() error {
|
func (c *Cluster) DeployK8sAddOns() error {
|
||||||
@ -16,18 +19,84 @@ func (c *Cluster) DeployK8sAddOns() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deployKubeDNS() error {
|
func (c *Cluster) DeployUserAddOns() error {
|
||||||
logrus.Infof("[plugins] Setting up KubeDNS")
|
logrus.Infof("[addons] Setting up user addons..")
|
||||||
|
if c.Addons == "" {
|
||||||
|
logrus.Infof("[addons] No user addons configured..")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
kubectlCmd := &KubectlCommand{
|
if err := c.doAddonDeploy(c.Addons, UserAddonResourceName); err != nil {
|
||||||
Cmd: []string{"apply -f /addons/kubedns*.yaml"},
|
return err
|
||||||
}
|
}
|
||||||
logrus.Infof("[plugins] Executing the deploy command..")
|
logrus.Infof("[addons] User addon deployed successfully..")
|
||||||
err := c.RunKubectlCmd(kubectlCmd)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to run kubectl command: %v", err)
|
|
||||||
}
|
|
||||||
logrus.Infof("[plugins] kubeDNS deployed successfully..")
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) deployKubeDNS() error {
|
||||||
|
logrus.Infof("[addons] Setting up KubeDNS")
|
||||||
|
|
||||||
|
kubeDNSYaml := addons.GetKubeDNSManifest(c.ClusterDNSServer, c.ClusterDomain)
|
||||||
|
|
||||||
|
if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logrus.Infof("[addons] KubeDNS deployed successfully..")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error {
|
||||||
|
|
||||||
|
err := c.StoreAddonConfigMap(addonYaml, resourceName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to save addon ConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Infof("[addons] Executing deploy job..")
|
||||||
|
|
||||||
|
addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].AdvertisedHostname, c.Services.KubeAPI.Image)
|
||||||
|
err = c.ApplySystemAddonExcuteJob(addonJob)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) StoreAddonConfigMap(addonYaml string, addonName string) error {
|
||||||
|
logrus.Infof("[addons] Saving addon ConfigMap to Kubernetes")
|
||||||
|
kubeClient, err := k8s.NewClient(c.LocalKubeConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
timeout := make(chan bool, 1)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
err := k8s.UpdateConfigMap(kubeClient, []byte(addonYaml), addonName)
|
||||||
|
if err != nil {
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
fmt.Println(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
logrus.Infof("[addons] Successfully Saved addon to Kubernetes ConfigMap: %s", addonName)
|
||||||
|
timeout <- true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
return nil
|
||||||
|
case <-time.After(time.Second * UpdateStateTimeout):
|
||||||
|
return fmt.Errorf("[addons] Timeout waiting for kubernetes to be ready")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) ApplySystemAddonExcuteJob(addonJob string) error {
|
||||||
|
|
||||||
|
if err := k8s.ApplyK8sSystemJob(addonJob, c.LocalKubeConfigPath); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -1,87 +0,0 @@
|
|||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/rancher/rke/docker"
|
|
||||||
"github.com/rancher/rke/pki"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
KubectlImage = "melsayed/kubectl:latest"
|
|
||||||
KubctlContainer = "kubectl"
|
|
||||||
)
|
|
||||||
|
|
||||||
type KubectlCommand struct {
|
|
||||||
Cmd []string
|
|
||||||
Env []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) buildClusterConfigEnv() []string {
|
|
||||||
// This needs to be updated when add more configuration
|
|
||||||
environmentMap := map[string]string{
|
|
||||||
ClusterCIDREnvName: c.ClusterCIDR,
|
|
||||||
ClusterDNSServerIPEnvName: c.ClusterDNSServer,
|
|
||||||
ClusterDomainEnvName: c.ClusterDomain,
|
|
||||||
}
|
|
||||||
adminConfig := c.Certificates[pki.KubeAdminCommonName]
|
|
||||||
//build ClusterConfigEnv
|
|
||||||
env := []string{
|
|
||||||
adminConfig.ConfigToEnv(),
|
|
||||||
}
|
|
||||||
for k, v := range environmentMap {
|
|
||||||
env = append(env, fmt.Sprintf("%s=%s", k, v))
|
|
||||||
}
|
|
||||||
return env
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) RunKubectlCmd(kubectlCmd *KubectlCommand) error {
|
|
||||||
h := c.ControlPlaneHosts[0]
|
|
||||||
|
|
||||||
logrus.Debugf("[kubectl] Using host [%s] for deployment", h.AdvertisedHostname)
|
|
||||||
logrus.Debugf("[kubectl] Pulling kubectl image..")
|
|
||||||
|
|
||||||
if err := docker.PullImage(h.DClient, h.AdvertisedHostname, KubectlImage); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterConfigEnv := c.buildClusterConfigEnv()
|
|
||||||
if kubectlCmd.Env != nil {
|
|
||||||
clusterConfigEnv = append(clusterConfigEnv, kubectlCmd.Env...)
|
|
||||||
}
|
|
||||||
|
|
||||||
imageCfg := &container.Config{
|
|
||||||
Image: KubectlImage,
|
|
||||||
Env: clusterConfigEnv,
|
|
||||||
Cmd: kubectlCmd.Cmd,
|
|
||||||
}
|
|
||||||
logrus.Debugf("[kubectl] Creating kubectl container..")
|
|
||||||
resp, err := h.DClient.ContainerCreate(context.Background(), imageCfg, nil, nil, KubctlContainer)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to create kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
|
|
||||||
}
|
|
||||||
logrus.Debugf("[kubectl] Container %s created..", resp.ID)
|
|
||||||
if err := h.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("Failed to start kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
|
|
||||||
}
|
|
||||||
logrus.Debugf("[kubectl] running command: %s", kubectlCmd.Cmd)
|
|
||||||
statusCh, errCh := h.DClient.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
|
|
||||||
select {
|
|
||||||
case err := <-errCh:
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to execute kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
|
|
||||||
}
|
|
||||||
case status := <-statusCh:
|
|
||||||
if status.StatusCode != 0 {
|
|
||||||
return fmt.Errorf("kubectl command failed on host [%s]: exit status %v", h.AdvertisedHostname, status.StatusCode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := h.DClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("Failed to remove kubectl container on host[%s]: %v", h.AdvertisedHostname, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,25 +1,21 @@
|
|||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"github.com/rancher/rke/network"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ClusterCIDREnvName = "RKE_CLUSTER_CIDR"
|
NetworkPluginResourceName = "rke-netwok-plugin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Cluster) DeployNetworkPlugin() error {
|
func (c *Cluster) DeployNetworkPlugin() error {
|
||||||
logrus.Infof("[network] Setting up network plugin: %s", c.Network.Plugin)
|
logrus.Infof("[network] Setting up network plugin: %s", c.Network.Plugin)
|
||||||
|
|
||||||
kubectlCmd := &KubectlCommand{
|
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR)
|
||||||
Cmd: []string{"apply -f /network/" + c.Network.Plugin + ".yaml"},
|
|
||||||
}
|
if err := c.doAddonDeploy(pluginYaml, NetworkPluginResourceName); err != nil {
|
||||||
logrus.Infof("[network] Executing the deploy command..")
|
return err
|
||||||
err := c.RunKubectlCmd(kubectlCmd)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to run kubectl command: %v", err)
|
|
||||||
}
|
}
|
||||||
logrus.Infof("[network] Network plugin deployed successfully..")
|
logrus.Infof("[network] Network plugin deployed successfully..")
|
||||||
return nil
|
return nil
|
||||||
|
@ -111,6 +111,11 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) {
|
|||||||
return APIURL, caCrt, clientCert, clientKey, err
|
return APIURL, caCrt, clientCert, clientKey, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = kubeCluster.DeployUserAddOns()
|
||||||
|
if err != nil {
|
||||||
|
return APIURL, caCrt, clientCert, clientKey, err
|
||||||
|
}
|
||||||
|
|
||||||
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
|
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
|
||||||
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
|
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
|
||||||
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
|
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
|
||||||
|
55
k8s/job.go
Normal file
55
k8s/job.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package k8s
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"k8s.io/api/batch/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ApplyK8sSystemJob(jobYaml, kubeConfigPath string) error {
|
||||||
|
job := v1.Job{}
|
||||||
|
decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(jobYaml)))
|
||||||
|
err := decoder.Decode(&job)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if job.Namespace == metav1.NamespaceNone {
|
||||||
|
job.Namespace = metav1.NamespaceSystem
|
||||||
|
}
|
||||||
|
k8sClient, err := NewClient(kubeConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = k8sClient.BatchV1().Jobs(job.Namespace).Create(&job); err != nil {
|
||||||
|
if apierrors.IsAlreadyExists(err) {
|
||||||
|
logrus.Debugf("[k8s] Job %s already exists..", job.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
existingJob := &v1.Job{}
|
||||||
|
for retries := 0; retries <= 5; retries++ {
|
||||||
|
time.Sleep(time.Second * 1)
|
||||||
|
existingJob, err = k8sClient.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to update job status: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
for _, condition := range existingJob.Status.Conditions {
|
||||||
|
if condition.Type == v1.JobComplete && condition.Status == corev1.ConditionTrue {
|
||||||
|
logrus.Debugf("[k8s] Job %s completed successfully..", job.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Failed to get job complete status: %v", err)
|
||||||
|
}
|
127
network/flannel.go
Normal file
127
network/flannel.go
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
package network
|
||||||
|
|
||||||
|
func GetFlannelManifest(clusterCIDR string) string {
|
||||||
|
return `
|
||||||
|
---
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: kube-flannel-cfg
|
||||||
|
namespace: "kube-system"
|
||||||
|
labels:
|
||||||
|
tier: node
|
||||||
|
app: flannel
|
||||||
|
data:
|
||||||
|
cni-conf.json: |
|
||||||
|
{
|
||||||
|
"name":"cbr0",
|
||||||
|
"cniVersion":"0.3.1",
|
||||||
|
"plugins":[
|
||||||
|
{
|
||||||
|
"type":"flannel",
|
||||||
|
"delegate":{
|
||||||
|
"forceAddress":true,
|
||||||
|
"isDefaultGateway":true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type":"portmap",
|
||||||
|
"capabilities":{
|
||||||
|
"portMappings":true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
net-conf.json: |
|
||||||
|
{
|
||||||
|
"Network": "` + clusterCIDR + `",
|
||||||
|
"Backend": {
|
||||||
|
"Type": "vxlan"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
---
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: kube-flannel
|
||||||
|
namespace: "kube-system"
|
||||||
|
labels:
|
||||||
|
tier: node
|
||||||
|
k8s-app: flannel
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
tier: node
|
||||||
|
k8s-app: flannel
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kube-flannel
|
||||||
|
image: quay.io/coreos/flannel:v0.8.0
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 300m
|
||||||
|
memory: 500M
|
||||||
|
requests:
|
||||||
|
cpu: 150m
|
||||||
|
memory: 64M
|
||||||
|
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
volumeMounts:
|
||||||
|
- name: run
|
||||||
|
mountPath: /run
|
||||||
|
- name: cni
|
||||||
|
mountPath: /etc/cni/net.d
|
||||||
|
- name: flannel-cfg
|
||||||
|
mountPath: /etc/kube-flannel/
|
||||||
|
- name: install-cni
|
||||||
|
image: quay.io/coreos/flannel-cni:v0.2.0
|
||||||
|
command: ["/install-cni.sh"]
|
||||||
|
env:
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: kube-flannel-cfg
|
||||||
|
key: cni-conf.json
|
||||||
|
- name: CNI_CONF_NAME
|
||||||
|
value: "10-flannel.conflist"
|
||||||
|
volumeMounts:
|
||||||
|
- name: cni
|
||||||
|
mountPath: /host/etc/cni/net.d
|
||||||
|
- name: host-cni-bin
|
||||||
|
mountPath: /host/opt/cni/bin/
|
||||||
|
hostNetwork: true
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
operator: Exists
|
||||||
|
effect: NoSchedule
|
||||||
|
volumes:
|
||||||
|
- name: run
|
||||||
|
hostPath:
|
||||||
|
path: /run
|
||||||
|
- name: cni
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
- name: flannel-cfg
|
||||||
|
configMap:
|
||||||
|
name: kube-flannel-cfg
|
||||||
|
- name: host-cni-bin
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
updateStrategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 20%
|
||||||
|
type: RollingUpdate`
|
||||||
|
}
|
@ -10,7 +10,7 @@ github.com/docker/distribution 3800056b8832cf6075e78b282ac010131d8687b
|
|||||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||||
golang.org/x/net 186fd3fc8194a5e9980a82230d69c1ff7134229f
|
golang.org/x/net 186fd3fc8194a5e9980a82230d69c1ff7134229f
|
||||||
github.com/rancher/types a71860ee9f4809a57d2cc7dadf2d74b9b8f2d736
|
github.com/rancher/types efe8c45673d9e2132cd78cb46e61340b5ffc4421
|
||||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||||
github.com/gogo/protobuf 117892bf1866fbaa2318c03e50e40564c8845457
|
github.com/gogo/protobuf 117892bf1866fbaa2318c03e50e40564c8845457
|
||||||
github.com/opencontainers/image-spec 7c889fafd04a893f5c5f50b7ab9963d5d64e5242
|
github.com/opencontainers/image-spec 7c889fafd04a893f5c5f50b7ab9963d5d64e5242
|
||||||
|
22
vendor/github.com/rancher/types/apis/cluster.cattle.io/v1/types.go
generated
vendored
22
vendor/github.com/rancher/types/apis/cluster.cattle.io/v1/types.go
generated
vendored
@ -31,7 +31,7 @@ type Cluster struct {
|
|||||||
Spec ClusterSpec `json:"spec"`
|
Spec ClusterSpec `json:"spec"`
|
||||||
// Most recent observed status of the cluster. More info:
|
// Most recent observed status of the cluster. More info:
|
||||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||||
Status *ClusterStatus `json:"status"`
|
Status ClusterStatus `json:"status"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClusterSpec struct {
|
type ClusterSpec struct {
|
||||||
@ -46,13 +46,13 @@ type ClusterStatus struct {
|
|||||||
Conditions []ClusterCondition `json:"conditions,omitempty"`
|
Conditions []ClusterCondition `json:"conditions,omitempty"`
|
||||||
//Component statuses will represent cluster's components (etcd/controller/scheduler) health
|
//Component statuses will represent cluster's components (etcd/controller/scheduler) health
|
||||||
// https://kubernetes.io/docs/api-reference/v1.8/#componentstatus-v1-core
|
// https://kubernetes.io/docs/api-reference/v1.8/#componentstatus-v1-core
|
||||||
ComponentStatuses []ClusterComponentStatus
|
ComponentStatuses []ClusterComponentStatus `json:"componentStatuses,omitempty"`
|
||||||
APIEndpoint string `json:"apiEndpoint,omitempty"`
|
APIEndpoint string `json:"apiEndpoint,omitempty"`
|
||||||
ServiceAccountToken string `json:"serviceAccountToken,omitempty"`
|
ServiceAccountToken string `json:"serviceAccountToken,omitempty"`
|
||||||
CACert string `json:"caCert,omitempty"`
|
CACert string `json:"caCert,omitempty"`
|
||||||
Capacity v1.ResourceList `json:"capacity,omitempty"`
|
Capacity v1.ResourceList `json:"capacity,omitempty"`
|
||||||
Allocatable v1.ResourceList `json:"allocatable,omitempty"`
|
Allocatable v1.ResourceList `json:"allocatable,omitempty"`
|
||||||
AppliedSpec ClusterSpec `json:"clusterSpec,omitempty"`
|
AppliedSpec ClusterSpec `json:"appliedSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClusterComponentStatus struct {
|
type ClusterComponentStatus struct {
|
||||||
@ -114,6 +114,8 @@ type RancherKubernetesEngineConfig struct {
|
|||||||
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
|
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
|
||||||
// Authentication configuration used in the cluster (default: x509)
|
// Authentication configuration used in the cluster (default: x509)
|
||||||
Authentication AuthConfig `yaml:"auth" json:"auth,omitempty"`
|
Authentication AuthConfig `yaml:"auth" json:"auth,omitempty"`
|
||||||
|
// YAML manifest for user provided addons to be deployed on the cluster
|
||||||
|
Addons string `yaml:"addons" json:"addons,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RKEConfigHost struct {
|
type RKEConfigHost struct {
|
||||||
@ -135,9 +137,9 @@ type RKEConfigServices struct {
|
|||||||
// Etcd Service
|
// Etcd Service
|
||||||
Etcd ETCDService `yaml:"etcd" json:"etcd,omitempty"`
|
Etcd ETCDService `yaml:"etcd" json:"etcd,omitempty"`
|
||||||
// KubeAPI Service
|
// KubeAPI Service
|
||||||
KubeAPI KubeAPIService `yaml:"kube-api" json:"kube-api,omitempty"`
|
KubeAPI KubeAPIService `yaml:"kube-api" json:"kubeApi,omitempty"`
|
||||||
// KubeController Service
|
// KubeController Service
|
||||||
KubeController KubeControllerService `yaml:"kube-controller" json:"kube-controller,omitempty"`
|
KubeController KubeControllerService `yaml:"kube-controller" json:"kubeController,omitempty"`
|
||||||
// Scheduler Service
|
// Scheduler Service
|
||||||
Scheduler SchedulerService `yaml:"scheduler" json:"scheduler,omitempty"`
|
Scheduler SchedulerService `yaml:"scheduler" json:"scheduler,omitempty"`
|
||||||
// Kubelet Service
|
// Kubelet Service
|
||||||
|
10
vendor/github.com/rancher/types/apis/cluster.cattle.io/v1/zz_generated_deepcopy.go
generated
vendored
10
vendor/github.com/rancher/types/apis/cluster.cattle.io/v1/zz_generated_deepcopy.go
generated
vendored
@ -73,15 +73,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
|
|||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
in.Spec.DeepCopyInto(&out.Spec)
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
if in.Status != nil {
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
in, out := &in.Status, &out.Status
|
|
||||||
if *in == nil {
|
|
||||||
*out = nil
|
|
||||||
} else {
|
|
||||||
*out = new(ClusterStatus)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/rancher/types/vendor.conf
generated
vendored
2
vendor/github.com/rancher/types/vendor.conf
generated
vendored
@ -3,4 +3,4 @@ github.com/rancher/types
|
|||||||
|
|
||||||
k8s.io/kubernetes v1.8.3 transitive=true,staging=true
|
k8s.io/kubernetes v1.8.3 transitive=true,staging=true
|
||||||
bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git
|
bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git
|
||||||
github.com/rancher/norman cad01ba487d6c071911c619babc45ae80c252229
|
github.com/rancher/norman 80024df69414f7cce0847ea72b0557f14edbc852
|
||||||
|
Loading…
Reference in New Issue
Block a user