1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-02 07:24:20 +00:00

Merge pull request #2 from moelsayed/add_network_plugin_support

add network plugin support
This commit is contained in:
Alena Prokharchyk
2017-11-07 16:03:19 -08:00
committed by GitHub
13 changed files with 504 additions and 1 deletions

View File

@@ -1,4 +1,5 @@
---
network_plugin: flannel
hosts:
- hostname: server1
ip: 1.1.1.1
@@ -27,6 +28,7 @@ services:
kubelet:
image: quay.io/coreos/hyperkube:v1.7.5_coreos.0
cluster_domain: cluster.local
cluster_dns_server: 10.233.0.3
infra_container_image: gcr.io/google_containers/pause-amd64:3.0
kubeproxy:
image: quay.io/coreos/hyperkube:v1.7.5_coreos.0

42
cluster/addons.go Normal file
View File

@@ -0,0 +1,42 @@
package cluster
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/pki"
)
const (
ClusterDnsServerIPEnvName = "RKE_DNS_SERVER"
ClusterDomainEnvName = "RKE_CLUSTER_DOMAIN"
)
func (c *Cluster) DeployK8sAddOns() error {
if err := c.deployKubeDNS(); err != nil {
return err
}
return nil
}
func (c *Cluster) deployKubeDNS() error {
logrus.Infof("[plugins] Setting up KubeDNS")
deployerHost := c.ControlPlaneHosts[0]
kubectlCmd := []string{"apply -f /addons/kubedns*.yaml"}
env := []string{
fmt.Sprintf("%s=%s", pki.KubeAdminConfigENVName, c.Certificates[pki.KubeAdminCommonName].Config),
fmt.Sprintf("%s=%s", ClusterDnsServerIPEnvName, c.ClusterDnsServer),
fmt.Sprintf("%s=%s", ClusterDomainEnvName, c.ClusterDomain),
}
logrus.Infof("[plugins] Executing the deploy command..")
err := k8s.RunKubectlCmd(deployerHost.DClient, deployerHost.Hostname, kubectlCmd, env)
if err != nil {
return fmt.Errorf("Failed to run kubectl command: %v", err)
}
logrus.Infof("[plugins] kubeDNS deployed successfully..")
return nil
}

View File

@@ -22,6 +22,9 @@ type Cluster struct {
KubernetesServiceIP net.IP
Certificates map[string]pki.CertificatePKI
ClusterDomain string
NetworkPlugin string `yaml:"network_plugin"`
ClusterCIDR string
ClusterDnsServer string
}
const (
@@ -65,6 +68,8 @@ func ParseConfig(clusterFile string) (*Cluster, error) {
return nil, fmt.Errorf("Failed to get Kubernetes Service IP: %v", err)
}
c.ClusterDomain = c.Services.Kubelet.ClusterDomain
c.ClusterCIDR = c.Services.KubeController.ClusterCIDR
c.ClusterDnsServer = c.Services.Kubelet.ClusterDnsServer
return c, nil
}

30
cluster/network.go Normal file
View File

@@ -0,0 +1,30 @@
package cluster
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/pki"
)
const (
ClusterCIDREnvName = "RKE_CLUSTER_CIDR"
)
func (c *Cluster) DeployNetworkPlugin() error {
logrus.Infof("[network] Setting up network plugin: %s", c.NetworkPlugin)
deployerHost := c.ControlPlaneHosts[0]
kubectlCmd := []string{"apply -f /network/" + c.NetworkPlugin + ".yaml"}
env := []string{
fmt.Sprintf("%s=%s", pki.KubeAdminConfigENVName, c.Certificates[pki.KubeAdminCommonName].Config),
fmt.Sprintf("%s=%s", ClusterCIDREnvName, c.ClusterCIDR),
}
logrus.Infof("[network] Executing the deploy command..")
err := k8s.RunKubectlCmd(deployerHost.DClient, deployerHost.Hostname, kubectlCmd, env)
if err != nil {
return fmt.Errorf("Failed to run kubectl command: %v", err)
}
logrus.Infof("[network] Network plugin deployed successfully..")
return nil
}

View File

@@ -77,6 +77,16 @@ func ClusterUp(clusterFile, authType string) (string, string, string, string, er
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployNetworkPlugin()
if err != nil {
return ApiURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.DeployK8sAddOns()
if err != nil {
return ApiURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SaveClusterState(clusterFile)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err

View File

@@ -1,6 +1,13 @@
package k8s
import (
"context"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/rancher/rke/docker"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -8,6 +15,11 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
const (
KubectlImage = "melsayed/kubectl:latest"
KubctlContainer = "kubectl"
)
func NewClient(kubeConfigPath string) (*kubernetes.Clientset, error) {
// use the current admin kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
@@ -88,3 +100,47 @@ func GetSecret(k8sClient *kubernetes.Clientset, secretName string) (*v1.Secret,
func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error {
return k8sClient.Nodes().Delete(nodeName, &metav1.DeleteOptions{})
}
func RunKubectlCmd(dClient *client.Client, hostname string, cmd []string, withEnv []string) error {
logrus.Debugf("[kubectl] Using host [%s] for deployment", hostname)
logrus.Debugf("[kubectl] Pulling kubectl image..")
if err := docker.PullImage(dClient, hostname, KubectlImage); err != nil {
return err
}
env := []string{}
if withEnv != nil {
env = append(env, withEnv...)
}
imageCfg := &container.Config{
Image: KubectlImage,
Env: env,
Cmd: cmd,
}
logrus.Debugf("[kubectl] Creating kubectl container..")
resp, err := dClient.ContainerCreate(context.Background(), imageCfg, nil, nil, KubctlContainer)
if err != nil {
return fmt.Errorf("Failed to create kubectl container on host [%s]: %v", hostname, err)
}
logrus.Debugf("[kubectl] Container %s created..", resp.ID)
if err := dClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("Failed to start kubectl container on host [%s]: %v", hostname, err)
}
logrus.Debugf("[kubectl] running command: %s", cmd)
statusCh, errCh := dClient.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
return fmt.Errorf("Failed to execute kubectl container on host [%s]: %v", hostname, err)
}
case status := <-statusCh:
if status.StatusCode != 0 {
return fmt.Errorf("kubectl command failed on host [%s]: exit status %v", hostname, status.StatusCode)
}
}
if err := dClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
return fmt.Errorf("Failed to remove kubectl container on host[%s]: %v", hostname, err)
}
return nil
}

View File

@@ -0,0 +1,10 @@
FROM alpine:3.4
RUN apk update && apk add bash curl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.7.5/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
RUN mkdir -p /network /addons /root/.kube/
COPY network/*.yaml /network/
COPY addons/*.yaml /addons/
COPY entrypoint.sh /tmp/entrypoint.sh
ENTRYPOINT ["/tmp/entrypoint.sh"]

View File

@@ -0,0 +1,202 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.
# Warning: This is a file generated from the base underscore template file: kubedns-controller.yaml.base
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=RKE_CLUSTER_DOMAIN.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/RKE_CLUSTER_DOMAIN/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.RKE_CLUSTER_DOMAIN,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.RKE_CLUSTER_DOMAIN,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: RKE_DNS_SERVER
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

22
package/kubectl/entrypoint.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash -x
# Set template configration
for i in $(env | grep -o RKE_.*=); do
key=$(echo "$i" | cut -f1 -d"=")
value=$(echo "${!key}")
for f in /network/*.yaml /addons/*.yaml; do
sed -i "s|${key}|${value}|g" ${f}
done
done
for i in $(env | grep -o KUBECFG_.*=); do
name="$(echo "$i" | cut -f1 -d"=" | tr '[:upper:]' '[:lower:]' | tr '_' '-').yaml"
env=$(echo "$i" | cut -f1 -d"=")
value=$(echo "${!env}")
if [ ! -f $SSL_CRTS_DIR/$name ]; then
echo "$value" > /root/.kube/config
fi
done
kubectl ${@}

View File

@@ -0,0 +1,122 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: "kube-system"
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name":"cbr0",
"cniVersion":"0.3.1",
"plugins":[
{
"type":"flannel",
"delegate":{
"forceAddress":true,
"isDefaultGateway":true
}
},
{
"type":"portmap",
"capabilities":{
"portMappings":true
}
}
]
}
net-conf.json: |
{
"Network": "RKE_CLUSTER_CIDR",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: "kube-system"
labels:
tier: node
k8s-app: flannel
spec:
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 300m
memory: 500M
requests:
cpu: 150m
memory: 64M
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel-cni:v0.2.0
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate

View File

@@ -57,4 +57,5 @@ const (
KubeAdminCommonName = "kube-admin"
KubeAdminOrganizationName = "system:masters"
KubeAdminConfigPath = "admin.config"
KubeAdminConfigENVName = "KUBECFG_ADMIN"
)

View File

@@ -26,7 +26,7 @@ func buildKubeletConfig(host hosts.Host, kubeletService Kubelet, isMaster bool)
"--cgroup-driver=cgroupfs",
"--cgroups-per-qos=True",
"--enforce-node-allocatable=",
"--cluster-dns=10.233.0.3",
"--cluster-dns=" + kubeletService.ClusterDnsServer,
"--network-plugin=cni",
"--cni-conf-dir=/etc/cni/net.d",
"--cni-bin-dir=/opt/cni/bin",

View File

@@ -28,6 +28,7 @@ type Kubelet struct {
Image string `yaml:"image"`
ClusterDomain string `yaml:"cluster_domain"`
InfraContainerImage string `yaml:"infra_container_image"`
ClusterDnsServer string `yaml:"cluster_dns_server"`
}
type Kubeproxy struct {