1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-19 18:30:38 +00:00

Enable RBAC and needed addons/network plugin configuration

This commit is contained in:
moelsayed
2017-12-14 23:56:19 +02:00
parent ad62b084f5
commit 8ea65915d3
22 changed files with 682 additions and 88 deletions

View File

@@ -11,6 +11,7 @@ spec:
name: pi
spec:
hostNetwork: true
serviceAccountName: rke-job-deployer
nodeName: ` + nodeName + `
containers:
- name: ` + addonName + `-pod

35
authz/authz.go Normal file
View File

@@ -0,0 +1,35 @@
package authz
import (
"github.com/rancher/rke/k8s"
"github.com/sirupsen/logrus"
)
func ApplyJobDeployerServiceAccount(kubeConfigPath string) error {
logrus.Infof("[authz] Creating rke-job-deployer ServiceAccount")
k8sClient, err := k8s.NewClient(kubeConfigPath)
if err != nil {
return err
}
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, jobDeployerClusterRoleBinding); err != nil {
return err
}
if err := k8s.UpdateServiceAccountFromYaml(k8sClient, jobDeployerServiceAccount); err != nil {
return err
}
logrus.Infof("[authz] rke-job-deployer ServiceAccount created successfully")
return nil
}
func ApplySystemNodeClusterRoleBinding(kubeConfigPath string) error {
logrus.Infof("[authz] Creating system:node ClusterRoleBinding")
k8sClient, err := k8s.NewClient(kubeConfigPath)
if err != nil {
return err
}
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, systemNodeClusterRoleBinding); err != nil {
return err
}
logrus.Infof("[authz] system:node ClusterRoleBinding created successfully")
return nil
}

42
authz/manifests.go Normal file
View File

@@ -0,0 +1,42 @@
package authz
const (
systemNodeClusterRoleBinding = `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "false"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io`
jobDeployerServiceAccount = `
apiVersion: v1
kind: ServiceAccount
metadata:
name: rke-job-deployer
namespace: kube-system`
jobDeployerClusterRoleBinding = `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: job-deployer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
namespace: kube-system
name: rke-job-deployer`
)

View File

@@ -12,7 +12,7 @@ auth:
# weave
#
# If you are using calico on AWS, use the network plugin config option:
# 'cloud_provider: aws'
# 'calico_cloud_provider: aws'
network:
plugin: flannel
options:
@@ -22,6 +22,11 @@ network:
ssh_key_path: ~/.ssh/test
ignore_docker_version: false
# Kubernetes authorization mode, currently only `rbac` is supported
authorization:
mode: rbac
options:
nodes:
- address: 1.1.1.1
user: ubuntu

View File

@@ -6,6 +6,7 @@ import (
"path/filepath"
"strings"
"github.com/rancher/rke/authz"
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/pki"
"github.com/rancher/rke/services"
@@ -58,10 +59,15 @@ func (c *Cluster) DeployClusterPlanes() error {
err = services.RunControlPlane(c.ControlPlaneHosts,
c.EtcdHosts,
c.Services,
c.SystemImages[ServiceSidekickImage])
c.SystemImages[ServiceSidekickImage],
c.Authorization.Mode)
if err != nil {
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
}
err = c.ApplyRBACResources()
if err != nil {
return fmt.Errorf("[auths] Failed to apply RBAC resources: %v", err)
}
err = services.RunWorkerPlane(c.ControlPlaneHosts,
c.WorkerHosts,
c.Services,
@@ -239,3 +245,15 @@ func getLocalAdminConfigWithNewAddress(localConfigPath, cpAddress string) string
string(config.CertData),
string(config.KeyData))
}
func (c *Cluster) ApplyRBACResources() error {
if err := authz.ApplyJobDeployerServiceAccount(c.LocalKubeConfigPath); err != nil {
return fmt.Errorf("Failed to apply the ServiceAccount needed for job execution: %v", err)
}
if c.Authorization.Mode == services.RBACAuthorizationMode {
if err := authz.ApplySystemNodeClusterRoleBinding(c.LocalKubeConfigPath); err != nil {
return fmt.Errorf("Failed to apply the ClusterRoleBinding needed for node authorization: %v", err)
}
}
return nil
}

View File

@@ -9,6 +9,8 @@ const (
DefaultClusterDomain = "cluster.local"
DefaultClusterSSHKeyPath = "~/.ssh/id_rsa"
DefaultDockerSockPath = "/var/run/docker.sock"
DefaultAuthStrategy = "x509"
DefaultNetworkPlugin = "flannel"
@@ -20,6 +22,9 @@ const (
DefaultCertDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
DefaultServiceSidekickImage = "rancher/rke-service-sidekick:0.1.0"
DefaultEtcdImage = "quay.io/coreos/etcd:latest"
DefaultK8sImage = "rancher/k8s:v1.8.3-rancher2"
DefaultFlannelImage = "quay.io/coreos/flannel:v0.9.1"
DefaultFlannelCNIImage = "quay.io/coreos/flannel-cni:v0.2.0"

View File

@@ -56,6 +56,7 @@ func (c *Cluster) doFlannelDeploy() error {
network.FlannelImage: c.Network.Options[FlannelImage],
network.FlannelCNIImage: c.Network.Options[FlannelCNIImage],
network.FlannelIface: c.Network.Options[FlannelIface],
network.RBACConfig: c.Authorization.Mode,
}
pluginYaml := network.GetFlannelManifest(flannelConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
@@ -75,6 +76,7 @@ func (c *Cluster) doCalicoDeploy() error {
network.ControllersImage: c.Network.Options[CalicoControllersImages],
network.CalicoctlImage: c.Network.Options[CalicoctlImage],
network.CloudProvider: c.Network.Options[CalicoCloudProvider],
network.RBACConfig: c.Authorization.Mode,
}
pluginYaml := network.GetCalicoManifest(calicoConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
@@ -90,13 +92,20 @@ func (c *Cluster) doCanalDeploy() error {
network.NodeImage: c.Network.Options[CanalNodeImage],
network.CNIImage: c.Network.Options[CanalCNIImage],
network.FlannelImage: c.Network.Options[CanalFlannelImage],
network.RBACConfig: c.Authorization.Mode,
}
pluginYaml := network.GetCanalManifest(canalConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
}
func (c *Cluster) doWeaveDeploy() error {
pluginYaml := network.GetWeaveManifest(c.ClusterCIDR, c.Network.Options[WeaveImage], c.Network.Options[WeaveCNIImage])
weaveConfig := map[string]string{
network.ClusterCIDR: c.ClusterCIDR,
network.WeaveImage: c.Network.Options[WeaveImage],
network.WeaveCNIImage: c.Network.Options[WeaveCNIImage],
network.RBACConfig: c.Authorization.Mode,
}
pluginYaml := network.GetWeaveManifest(weaveConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
}

View File

@@ -128,11 +128,18 @@ func clusterConfig(ctx *cli.Context) error {
cluster.Network = *networkConfig
// Get Authentication Config
authConfig, err := getAuthConfig(reader)
authnConfig, err := getAuthnConfig(reader)
if err != nil {
return err
}
cluster.Authentication = *authConfig
cluster.Authentication = *authnConfig
// Get Authorization config
authzConfig, err := getAuthzConfig(reader)
if err != nil {
return err
}
cluster.Authorization = *authzConfig
// Get Services Config
serviceConfig, err := getServiceConfig(reader)
@@ -207,7 +214,7 @@ func getHostConfig(reader *bufio.Reader, index int) (*v3.RKEConfigNode, error) {
}
host.InternalAddress = internalAddress
dockerSocketPath, err := getConfig(reader, fmt.Sprintf("Docker socket path on host (%s)", address), "/var/run/docker.sock")
dockerSocketPath, err := getConfig(reader, fmt.Sprintf("Docker socket path on host (%s)", address), cluster.DefaultDockerSockPath)
if err != nil {
return nil, err
}
@@ -224,13 +231,13 @@ func getServiceConfig(reader *bufio.Reader) (*v3.RKEConfigServices, error) {
servicesConfig.Kubelet = v3.KubeletService{}
servicesConfig.Kubeproxy = v3.KubeproxyService{}
etcdImage, err := getConfig(reader, "Etcd Docker Image", "quay.io/coreos/etcd:latest")
etcdImage, err := getConfig(reader, "Etcd Docker Image", cluster.DefaultEtcdImage)
if err != nil {
return nil, err
}
servicesConfig.Etcd.Image = etcdImage
kubeImage, err := getConfig(reader, "Kubernetes Docker image", "rancher/k8s:v1.8.3-rancher2")
kubeImage, err := getConfig(reader, "Kubernetes Docker image", cluster.DefaultK8sImage)
if err != nil {
return nil, err
}
@@ -240,32 +247,32 @@ func getServiceConfig(reader *bufio.Reader) (*v3.RKEConfigServices, error) {
servicesConfig.Kubelet.Image = kubeImage
servicesConfig.Kubeproxy.Image = kubeImage
clusterDomain, err := getConfig(reader, "Cluster domain", "cluster.local")
clusterDomain, err := getConfig(reader, "Cluster domain", cluster.DefaultClusterDomain)
if err != nil {
return nil, err
}
servicesConfig.Kubelet.ClusterDomain = clusterDomain
serviceClusterIPRange, err := getConfig(reader, "Service Cluster IP Range", "10.233.0.0/18")
serviceClusterIPRange, err := getConfig(reader, "Service Cluster IP Range", cluster.DefaultServiceClusterIPRange)
if err != nil {
return nil, err
}
servicesConfig.KubeAPI.ServiceClusterIPRange = serviceClusterIPRange
servicesConfig.KubeController.ServiceClusterIPRange = serviceClusterIPRange
clusterNetworkCidr, err := getConfig(reader, "Cluster Network CIDR", "10.233.64.0/18")
clusterNetworkCidr, err := getConfig(reader, "Cluster Network CIDR", cluster.DefaultClusterCIDR)
if err != nil {
return nil, err
}
servicesConfig.KubeController.ClusterCIDR = clusterNetworkCidr
clusterDNSServiceIP, err := getConfig(reader, "Cluster DNS Service IP", "10.233.0.3")
clusterDNSServiceIP, err := getConfig(reader, "Cluster DNS Service IP", cluster.DefaultClusterDNSService)
if err != nil {
return nil, err
}
servicesConfig.Kubelet.ClusterDNSServer = clusterDNSServiceIP
infraPodImage, err := getConfig(reader, "Infra Container image", "gcr.io/google_containers/pause-amd64:3.0")
infraPodImage, err := getConfig(reader, "Infra Container image", cluster.DefaultInfraContainerImage)
if err != nil {
return nil, err
}
@@ -273,21 +280,31 @@ func getServiceConfig(reader *bufio.Reader) (*v3.RKEConfigServices, error) {
return &servicesConfig, nil
}
func getAuthConfig(reader *bufio.Reader) (*v3.AuthnConfig, error) {
authConfig := v3.AuthnConfig{}
func getAuthnConfig(reader *bufio.Reader) (*v3.AuthnConfig, error) {
authnConfig := v3.AuthnConfig{}
authType, err := getConfig(reader, "Authentication Strategy", "x509")
authnType, err := getConfig(reader, "Authentication Strategy", cluster.DefaultAuthStrategy)
if err != nil {
return nil, err
}
authConfig.Strategy = authType
return &authConfig, nil
authnConfig.Strategy = authnType
return &authnConfig, nil
}
func getAuthzConfig(reader *bufio.Reader) (*v3.AuthzConfig, error) {
authzConfig := v3.AuthzConfig{}
authzMode, err := getConfig(reader, "Authorization Mode", "")
if err != nil {
return nil, err
}
authzConfig.Mode = authzMode
return &authzConfig, nil
}
func getNetworkConfig(reader *bufio.Reader) (*v3.NetworkConfig, error) {
networkConfig := v3.NetworkConfig{}
networkPlugin, err := getConfig(reader, "Network Plugin Type", "flannel")
networkPlugin, err := getConfig(reader, "Network Plugin Type", cluster.DefaultNetworkCloudProvider)
if err != nil {
return nil, err
}

34
k8s/clusterrole.go Normal file
View File

@@ -0,0 +1,34 @@
package k8s
import (
"bytes"
"time"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"
)
func UpdateClusterRoleBindingFromYaml(k8sClient *kubernetes.Clientset, clusterRoleBindingYaml string) error {
clusterRoleBinding := rbacv1.ClusterRoleBinding{}
decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(clusterRoleBindingYaml)))
err := decoder.Decode(&clusterRoleBinding)
if err != nil {
return err
}
for retries := 0; retries <= 5; retries++ {
if _, err = k8sClient.RbacV1().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
if apierrors.IsAlreadyExists(err) {
if _, err = k8sClient.RbacV1().ClusterRoleBindings().Update(&clusterRoleBinding); err == nil {
return nil
}
}
} else {
return nil
}
time.Sleep(time.Second * 5)
}
return err
}

35
k8s/serviceaccount.go Normal file
View File

@@ -0,0 +1,35 @@
package k8s
import (
"bytes"
"time"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"
)
func UpdateServiceAccountFromYaml(k8sClient *kubernetes.Clientset, serviceAccountYaml string) error {
serviceAccount := v1.ServiceAccount{}
decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(serviceAccountYaml)))
err := decoder.Decode(&serviceAccount)
if err != nil {
return err
}
for retries := 0; retries <= 5; retries++ {
if _, err = k8sClient.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(&serviceAccount); err != nil {
if apierrors.IsAlreadyExists(err) {
if _, err = k8sClient.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Update(&serviceAccount); err == nil {
return nil
}
}
} else {
return nil
}
time.Sleep(time.Second * 5)
}
return err
}

View File

@@ -1,56 +1,21 @@
package network
import "github.com/rancher/rke/services"
func GetCalicoManifest(calicoConfig map[string]string) string {
awsIPPool := ""
if calicoConfig[CloudProvider] == AWSCloudProvider {
awsIPPool = `
---
kind: ConfigMap
apiVersion: v1
metadata:
name: aws-ippool
namespace: kube-system
data:
aws-ippool: |-
apiVersion: v1
kind: ipPool
metadata:
cidr: ` + calicoConfig[ClusterCIDR] + `
spec:
nat-outgoing: true
---
apiVersion: v1
kind: Pod
metadata:
name: calicoctl
namespace: kube-system
spec:
hostNetwork: true
restartPolicy: OnFailure
containers:
- name: calicoctl
image: ` + calicoConfig[CalicoctlImage] + `
command: ["/bin/sh", "-c", "calicoctl apply -f aws-ippool.yaml"]
env:
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
volumeMounts:
- name: ippool-config
mountPath: /root/
volumes:
- name: ippool-config
configMap:
name: aws-ippool
items:
- key: aws-ippool
path: aws-ippool.yaml
`
awsIPPool = getCalicoAWSIPPoolManifest(calicoConfig)
}
return `# Calico Version master
rbacConfig := ""
if calicoConfig[RBACConfig] == services.RBACAuthorizationMode {
rbacConfig = getCalicoRBACManifest()
}
return rbacConfig + `
---
# Calico Version master
# https://docs.projectcalico.org/master/releases#master
# This manifest includes the following component versions:
# calico/node:master
@@ -445,3 +410,134 @@ metadata:
` + awsIPPool + `
`
}
func getCalicoAWSIPPoolManifest(calicoConfig map[string]string) string {
return `
---
kind: ConfigMap
apiVersion: v1
metadata:
name: aws-ippool
namespace: kube-system
data:
aws-ippool: |-
apiVersion: v1
kind: ipPool
metadata:
cidr: ` + calicoConfig[ClusterCIDR] + `
spec:
nat-outgoing: true
---
apiVersion: v1
kind: Pod
metadata:
name: calicoctl
namespace: kube-system
spec:
hostNetwork: true
restartPolicy: OnFailure
containers:
- name: calicoctl
image: ` + calicoConfig[CalicoctlImage] + `
command: ["/bin/sh", "-c", "calicoctl apply -f aws-ippool.yaml"]
env:
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
volumeMounts:
- name: ippool-config
mountPath: /root/
volumes:
- name: ippool-config
configMap:
name: aws-ippool
items:
- key: aws-ippool
path: aws-ippool.yaml
`
}
func getCalicoRBACManifest() string {
return `
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
`
}

View File

@@ -1,7 +1,15 @@
package network
import "github.com/rancher/rke/services"
func GetCanalManifest(canalConfig map[string]string) string {
return `# This ConfigMap can be used to configure a self-hosted Canal installation.
rbacConfig := ""
if canalConfig[RBACConfig] == services.RBACAuthorizationMode {
rbacConfig = getCanalRBACManifest()
}
return rbacConfig + `
---
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
@@ -328,3 +336,123 @@ metadata:
namespace: kube-system
`
}
func getCanalRBACManifest() string {
return `
# Calico Roles
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- bgppeers
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
`
}

View File

@@ -1,13 +1,21 @@
package network
import "fmt"
import (
"fmt"
"github.com/rancher/rke/services"
)
func GetFlannelManifest(flannelConfig map[string]string) string {
var extraArgs string
if len(flannelConfig[FlannelIface]) > 0 {
extraArgs = fmt.Sprintf(",--iface=%s", flannelConfig[FlannelIface])
}
return `
rbacConfig := ""
if flannelConfig[RBACConfig] == services.RBACAuthorizationMode {
rbacConfig = getFlannelRBACManifest()
}
return rbacConfig + `
---
kind: ConfigMap
apiVersion: v1
@@ -61,6 +69,7 @@ spec:
tier: node
k8s-app: flannel
spec:
serviceAccountName: flannel
containers:
- name: kube-flannel
image: ` + flannelConfig[FlannelImage] + `
@@ -129,5 +138,53 @@ spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate`
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system`
}
func getFlannelRBACManifest() string {
return `
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch`
}

View File

@@ -17,4 +17,7 @@ const (
FlannelIface = "flannelIface"
CloudProvider = "cloudprovider"
AWSCloudProvider = "aws"
RBACConfig = "rbacConfig"
WeaveImage = "weaveImage"
WeaveCNIImage = "weaveCNIImage"
)

View File

@@ -1,7 +1,14 @@
package network
func GetWeaveManifest(clusterCIDR, image, cniImage string) string {
return `# This ConfigMap can be used to configure a self-hosted Weave Net installation.
import "github.com/rancher/rke/services"
func GetWeaveManifest(weaveConfig map[string]string) string {
rbacConfig := ""
if weaveConfig[RBACConfig] == services.RBACAuthorizationMode {
rbacConfig = getWeaveRBACManifest()
}
return `
# This ConfigMap can be used to configure a self-hosted Weave Net installation.
apiVersion: v1
kind: List
items:
@@ -34,8 +41,8 @@ items:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: "` + clusterCIDR + `"
image: ` + image + `
value: "` + weaveConfig[ClusterCIDR] + `"
image: ` + weaveConfig[WeaveImage] + `
livenessProbe:
httpGet:
host: 127.0.0.1
@@ -70,7 +77,7 @@ items:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ` + cniImage + `
image: ` + weaveConfig[WeaveCNIImage] + `
resources:
requests:
cpu: 10m
@@ -112,5 +119,100 @@ items:
path: /run/xtables.lock
updateStrategy:
type: RollingUpdate
`
` + rbacConfig
}
func getWeaveRBACManifest() string {
return `
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
rules:
- apiGroups:
- ''
resourceNames:
- weave-net
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system`
}

View File

@@ -45,7 +45,7 @@ const (
KubeProxyConfigPath = "/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml"
KubeNodeName = "kube-node"
KubeNodeCommonName = "kube-node"
KubeNodeCommonName = "system:node"
KubeNodeOrganizationName = "system:nodes"
KubeNodeCertENVName = "KUBE_NODE"
KubeNodeKeyENVName = "KUBE_NODE_KEY"

View File

@@ -6,7 +6,7 @@ import (
"github.com/sirupsen/logrus"
)
func RunControlPlane(controlHosts, etcdHosts []*hosts.Host, controlServices v3.RKEConfigServices, sidekickImage string) error {
func RunControlPlane(controlHosts, etcdHosts []*hosts.Host, controlServices v3.RKEConfigServices, sidekickImage, authorizationMode string) error {
logrus.Infof("[%s] Building up Controller Plane..", ControlRole)
for _, host := range controlHosts {
@@ -20,12 +20,12 @@ func RunControlPlane(controlHosts, etcdHosts []*hosts.Host, controlServices v3.R
return err
}
// run kubeapi
err := runKubeAPI(host, etcdHosts, controlServices.KubeAPI)
err := runKubeAPI(host, etcdHosts, controlServices.KubeAPI, authorizationMode)
if err != nil {
return err
}
// run kubecontroller
err = runKubeController(host, controlServices.KubeController)
err = runKubeController(host, controlServices.KubeController, authorizationMode)
if err != nil {
return err
}

View File

@@ -11,9 +11,9 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runKubeAPI(host *hosts.Host, etcdHosts []*hosts.Host, kubeAPIService v3.KubeAPIService) error {
func runKubeAPI(host *hosts.Host, etcdHosts []*hosts.Host, kubeAPIService v3.KubeAPIService, authorizationMode string) error {
etcdConnString := GetEtcdConnString(etcdHosts)
imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString)
imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString, authorizationMode)
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole)
}
@@ -21,7 +21,7 @@ func removeKubeAPI(host *hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, KubeAPIContainerName, host.Address)
}
func buildKubeAPIConfig(host *hosts.Host, kubeAPIService v3.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) {
func buildKubeAPIConfig(host *hosts.Host, kubeAPIService v3.KubeAPIService, etcdConnString, authorizationMode string) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{
Image: kubeAPIService.Image,
Entrypoint: []string{"/opt/rke/entrypoint.sh",
@@ -44,6 +44,9 @@ func buildKubeAPIConfig(host *hosts.Host, kubeAPIService v3.KubeAPIService, etcd
"--tls-private-key-file=" + pki.KubeAPIKeyPath,
"--service-account-key-file=" + pki.KubeAPIKeyPath},
}
if authorizationMode == RBACAuthorizationMode {
imageCfg.Cmd = append(imageCfg.Cmd, "--authorization-mode=RBAC")
}
hostCfg := &container.HostConfig{
VolumesFrom: []string{
SidekickContainerName,

View File

@@ -56,7 +56,7 @@ func TestKubeAPIConfig(t *testing.T) {
etcdConnString := GetEtcdConnString(etcdHosts)
assertEqual(t, etcdConnString, TestEtcdConnString, "")
imageCfg, hostCfg := buildKubeAPIConfig(cpHost, kubeAPIService, etcdConnString)
imageCfg, hostCfg := buildKubeAPIConfig(cpHost, kubeAPIService, etcdConnString, "")
// Test image and host config
assertEqual(t, isStringInSlice(TestInsecureBindAddress, imageCfg.Entrypoint), true,
fmt.Sprintf("Failed to find [%s] in Entrypoint of KubeAPI", TestInsecureBindAddress))

View File

@@ -10,8 +10,8 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runKubeController(host *hosts.Host, kubeControllerService v3.KubeControllerService) error {
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService)
func runKubeController(host *hosts.Host, kubeControllerService v3.KubeControllerService, authorizationMode string) error {
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService, authorizationMode)
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole)
}
@@ -19,7 +19,7 @@ func removeKubeController(host *hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, KubeControllerContainerName, host.Address)
}
func buildKubeControllerConfig(kubeControllerService v3.KubeControllerService) (*container.Config, *container.HostConfig) {
func buildKubeControllerConfig(kubeControllerService v3.KubeControllerService, authorizationMode string) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{
Image: kubeControllerService.Image,
Entrypoint: []string{"/opt/rke/entrypoint.sh",
@@ -39,6 +39,9 @@ func buildKubeControllerConfig(kubeControllerService v3.KubeControllerService) (
"--root-ca-file=" + pki.CACertPath,
},
}
if authorizationMode == RBACAuthorizationMode {
imageCfg.Cmd = append(imageCfg.Cmd, "--use-service-account-credentials=true")
}
hostCfg := &container.HostConfig{
VolumesFrom: []string{
SidekickContainerName,

View File

@@ -25,7 +25,7 @@ func TestKubeControllerConfig(t *testing.T) {
kubeControllerService.ServiceClusterIPRange = TestKubeControllerServiceClusterIPRange
kubeControllerService.ExtraArgs = map[string]string{"foo": "bar"}
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService)
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService, "")
// Test image and host config
assertEqual(t, isStringInSlice(TestClusterCidrPrefix+TestKubeControllerClusterCidr, imageCfg.Entrypoint), true,
fmt.Sprintf("Failed to find [%s] in KubeController Command", TestClusterCidrPrefix+TestKubeControllerClusterCidr))

View File

@@ -16,6 +16,7 @@ const (
WorkerRole = "worker"
SidekickServiceName = "sidekick"
RBACAuthorizationMode = "rbac"
KubeAPIContainerName = "kube-api"
KubeletContainerName = "kubelet"