mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Move some code from apiclient.go to the dedicated apiconfig phase package. Add constants and somewhat refactor the RBAC code as well
This commit is contained in:
parent
8ffada6699
commit
79515ec880
@ -33,7 +33,7 @@ import (
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
|
||||
kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig"
|
||||
apiconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig"
|
||||
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
@ -210,7 +210,7 @@ func (i *Init) Run(out io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3: Bootstrap the control plane
|
||||
// PHASE 3: Bootstrap the control plane
|
||||
if err := kubemaster.WriteStaticPodManifests(i.cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -220,38 +220,10 @@ func (i *Init) Run(out io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.cfg.AuthorizationMode == kubeadmconstants.AuthzModeRBAC {
|
||||
err = apiconfig.CreateBootstrapRBACClusterRole(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = apiconfig.CreateKubeDNSRBACClusterRole(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: remove this when https://github.com/kubernetes/kubeadm/issues/114 is fixed
|
||||
err = apiconfig.CreateKubeProxyClusterRoleBinding(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubemaster.UpdateMasterRoleLabelsAndTaints(client, false); err != nil {
|
||||
if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.cfg.Discovery.Token != nil {
|
||||
fmt.Printf("[token-discovery] Using token: %s\n", kubeadmutil.BearerToken(i.cfg.Discovery.Token))
|
||||
if err := kubemaster.CreateDiscoveryDeploymentAndSecret(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubeadmutil.UpdateOrCreateToken(client, i.cfg.Discovery.Token, kubeadmutil.DefaultTokenDuration); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Is deployment type self-hosted?
|
||||
if i.selfHosted {
|
||||
// Temporary control plane is up, now we create our self hosted control
|
||||
@ -262,6 +234,31 @@ func (i *Init) Run(out io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
// PHASE 4: Set up various things in the API
|
||||
// Create the necessary ServiceAccounts
|
||||
err = apiconfigphase.CreateServiceAccounts(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.cfg.AuthorizationMode == kubeadmconstants.AuthzModeRBAC {
|
||||
err = apiconfigphase.CreateRBACRules(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if i.cfg.Discovery.Token != nil {
|
||||
fmt.Printf("[token-discovery] Using token: %s\n", kubeadmutil.BearerToken(i.cfg.Discovery.Token))
|
||||
if err := kubemaster.CreateDiscoveryDeploymentAndSecret(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubeadmutil.UpdateOrCreateToken(client, i.cfg.Discovery.Token, kubeadmutil.DefaultTokenDuration); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// PHASE 5: Deploy essential addons
|
||||
if err := kubemaster.CreateEssentialAddons(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -39,4 +39,8 @@ const (
|
||||
|
||||
// Important: a "v"-prefix shouldn't exist here; semver doesn't allow that
|
||||
MinimumControlPlaneVersion = "1.6.0-alpha.1"
|
||||
|
||||
// Constants for what we name our ServiceAccounts with limited access to the cluster in case of RBAC
|
||||
KubeDNSServiceAccountName = "kube-dns"
|
||||
KubeProxyServiceAccountName = "kube-proxy"
|
||||
)
|
||||
|
@ -38,7 +38,6 @@ go_library(
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/uuid",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
@ -310,11 +310,7 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
|
||||
|
||||
kubeDNSDeployment := NewDeployment(KubeDNS, 1, createKubeDNSPodSpec(cfg))
|
||||
SetMasterTaintTolerations(&kubeDNSDeployment.Spec.Template.ObjectMeta)
|
||||
kubeDNSServiceAccount := &v1.ServiceAccount{}
|
||||
kubeDNSServiceAccount.ObjectMeta.Name = KubeDNS
|
||||
if _, err := client.ServiceAccounts(metav1.NamespaceSystem).Create(kubeDNSServiceAccount); err != nil {
|
||||
return fmt.Errorf("failed creating kube-dns service account [%v]", err)
|
||||
}
|
||||
|
||||
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kubeDNSDeployment); err != nil {
|
||||
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
@ -34,8 +33,11 @@ import (
|
||||
|
||||
const apiCallRetryInterval = 500 * time.Millisecond
|
||||
|
||||
// TODO: This method shouldn't exist as a standalone function but be integrated into CreateClientFromFile
|
||||
func createAPIClient(adminKubeconfig *clientcmdapi.Config) (*clientset.Clientset, error) {
|
||||
func CreateClientFromFile(path string) (*clientset.Clientset, error) {
|
||||
adminKubeconfig, err := clientcmd.LoadFromFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load admin kubeconfig [%v]", err)
|
||||
}
|
||||
adminClientConfig, err := clientcmd.NewDefaultClientConfig(
|
||||
*adminKubeconfig,
|
||||
&clientcmd.ConfigOverrides{},
|
||||
@ -51,14 +53,6 @@ func createAPIClient(adminKubeconfig *clientcmdapi.Config) (*clientset.Clientset
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func CreateClientFromFile(path string) (*clientset.Clientset, error) {
|
||||
adminKubeconfig, err := clientcmd.LoadFromFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load admin kubeconfig [%v]", err)
|
||||
}
|
||||
return createAPIClient(adminKubeconfig)
|
||||
}
|
||||
|
||||
func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) {
|
||||
client, err := CreateClientFromFile(file)
|
||||
if err != nil {
|
||||
@ -171,55 +165,6 @@ func NewDeployment(deploymentName string, replicas int32, podSpec v1.PodSpec) *e
|
||||
}
|
||||
}
|
||||
|
||||
// It's safe to do this for alpha, as we don't have HA and there is no way we can get
|
||||
// more then one node here (TODO(phase1+) use os.Hostname)
|
||||
func findMyself(client *clientset.Clientset) (*v1.Node, error) {
|
||||
nodeList, err := client.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes [%v]", err)
|
||||
}
|
||||
if len(nodeList.Items) < 1 {
|
||||
return nil, fmt.Errorf("no nodes found")
|
||||
}
|
||||
node := &nodeList.Items[0]
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error {
|
||||
n, err := findMyself(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.ObjectMeta.Labels[metav1.NodeLabelKubeadmAlphaRole] = metav1.NodeLabelRoleMaster
|
||||
|
||||
if !schedulable {
|
||||
taintsAnnotation, _ := json.Marshal([]v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
n.ObjectMeta.Annotations[v1.TaintsAnnotationKey] = string(taintsAnnotation)
|
||||
}
|
||||
|
||||
if _, err := client.Nodes().Update(n); err != nil {
|
||||
if apierrs.IsConflict(err) {
|
||||
fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)")
|
||||
time.Sleep(apiCallRetryInterval)
|
||||
attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error {
|
||||
// TODO(phase1+) use iterate instead of recursion
|
||||
err := attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update master node - [%v]", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetMasterTaintTolerations(meta *metav1.ObjectMeta) {
|
||||
tolerationsAnnotation, _ := json.Marshal([]v1.Toleration{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
if meta.Annotations == nil {
|
||||
|
@ -17,22 +17,11 @@ limitations under the License.
|
||||
package master
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestCreateClientAndWaitForAPI(t *testing.T) {
|
||||
cfg := &kubeadmapi.MasterConfiguration{
|
||||
Networking: kubeadm.Networking{DNSDomain: "localhost"},
|
||||
}
|
||||
fmt.Println(cfg)
|
||||
|
||||
}
|
||||
|
||||
func TestStandardLabels(t *testing.T) {
|
||||
var tests = []struct {
|
||||
n string
|
||||
@ -90,7 +79,7 @@ func TestNewDaemonSet(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
p := apiv1.PodSpec{}
|
||||
p := v1.PodSpec{}
|
||||
actual := NewDaemonSet(rt.dn, p)
|
||||
if actual.Spec.Selector.MatchLabels["k8s-app"] != rt.expected {
|
||||
t.Errorf(
|
||||
@ -132,7 +121,7 @@ func TestNewService(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
p := apiv1.ServiceSpec{}
|
||||
p := v1.ServiceSpec{}
|
||||
actual := NewService(rt.dn, p)
|
||||
if actual.ObjectMeta.Labels["k8s-app"] != rt.expected {
|
||||
t.Errorf(
|
||||
@ -174,7 +163,7 @@ func TestNewDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
p := apiv1.PodSpec{}
|
||||
p := v1.PodSpec{}
|
||||
actual := NewDeployment(rt.dn, 1, p)
|
||||
if actual.Spec.Selector.MatchLabels["k8s-app"] != rt.expected {
|
||||
t.Errorf(
|
||||
|
@ -9,12 +9,18 @@ load(
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["clusterroles.go"],
|
||||
srcs = [
|
||||
"clusterroles.go",
|
||||
"setupmaster.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/master:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/rbac/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
],
|
||||
)
|
||||
|
@ -20,38 +20,76 @@ import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/master"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
)
|
||||
|
||||
// CreateBootstrapRBACClusterRole grants the system:node-bootstrapper role to the group we created the bootstrap credential with
|
||||
func CreateBootstrapRBACClusterRole(clientset *clientset.Clientset) error {
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:kubelet-bootstrap",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:node-bootstrapper",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{Kind: "Group", Name: master.KubeletBootstrapGroup},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
const (
|
||||
// TODO: This role should eventually be a system:-prefixed, automatically bootstrapped ClusterRole
|
||||
|
||||
// KubeDNSClusterRoleName sets the name for the kube-dns ClusterRole
|
||||
KubeDNSClusterRoleName = "kubeadm:kube-dns"
|
||||
// KubeProxyClusterRoleName sets the name for the kube-proxy ClusterRole
|
||||
KubeProxyClusterRoleName = "system:node-proxier"
|
||||
// NodeBootstrapperClusterRoleName sets the name for the TLS Node Bootstrapper ClusterRole
|
||||
NodeBootstrapperClusterRoleName = "system:node-bootstrapper"
|
||||
|
||||
// Constants
|
||||
clusterRoleKind = "ClusterRole"
|
||||
serviceAccountKind = "ServiceAccount"
|
||||
rbacAPIGroup = "rbac.authorization.k8s.io"
|
||||
)
|
||||
|
||||
// TODO: Are there any unit tests that could be made for this file other than duplicating all values and logic in a separate file?
|
||||
|
||||
// CreateRBACRules creates the essential RBAC rules for a minimally set-up cluster
|
||||
func CreateRBACRules(clientset *clientset.Clientset) error {
|
||||
// Create the ClusterRoles we need for our RBAC rules
|
||||
if err := CreateClusterRoles(clientset); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create the CreateClusterRoleBindings we need for our RBAC rules
|
||||
if err := CreateClusterRoleBindings(clientset); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("[apiconfig] Created node bootstrapper RBAC rules")
|
||||
|
||||
fmt.Println("[apiconfig] Created RBAC rules")
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateKubeDNSRBACClusterRole creates the necessary ClusterRole for kube-dns
|
||||
func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error {
|
||||
// CreateServiceAccounts creates the necessary serviceaccounts that kubeadm uses/might use.
|
||||
func CreateServiceAccounts(clientset *clientset.Clientset) error {
|
||||
serviceAccounts := []v1.ServiceAccount{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeDNSServiceAccountName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeadmconstants.KubeProxyServiceAccountName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, sa := range serviceAccounts {
|
||||
if _, err := clientset.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(&sa); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateClusterRoles creates the ClusterRoles that aren't bootstrapped by the apiserver
|
||||
func CreateClusterRoles(clientset *clientset.Clientset) error {
|
||||
// TODO: Remove this ClusterRole when it's automatically bootstrapped in the apiserver
|
||||
clusterRole := rbac.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "kubeadm:" + master.KubeDNS},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: KubeDNSClusterRoleName},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups("").Resources("endpoints", "services").RuleOrDie(),
|
||||
// TODO: remove watch rule when https://github.com/kubernetes/kubernetes/pull/38816 gets merged
|
||||
@ -61,53 +99,68 @@ func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error {
|
||||
if _, err := clientset.Rbac().ClusterRoles().Create(&clusterRole); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subject := rbac.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Name: master.KubeDNS,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
}
|
||||
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:" + master.KubeDNS,
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole.Name,
|
||||
},
|
||||
Subjects: []rbac.Subject{subject},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("[apiconfig] Created kube-dns RBAC rules")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateKubeProxyClusterRoleBinding grants the system:node-proxier role to the nodes group,
|
||||
// since kubelet credentials are used to run the kube-proxy
|
||||
// TODO: give the kube-proxy its own credential and stop requiring this
|
||||
func CreateKubeProxyClusterRoleBinding(clientset *clientset.Clientset) error {
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:node-proxier",
|
||||
// CreateClusterRoleBindings creates all necessary bindings between bootstrapped & kubeadm-created ClusterRoles and subjects kubeadm is using
|
||||
func CreateClusterRoleBindings(clientset *clientset.Clientset) error {
|
||||
clusterRoleBindings := []rbac.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:kubelet-bootstrap",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbacAPIGroup,
|
||||
Kind: clusterRoleKind,
|
||||
Name: NodeBootstrapperClusterRoleName,
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "Group",
|
||||
Name: master.KubeletBootstrapGroup,
|
||||
},
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:node-proxier",
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:kube-dns",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbacAPIGroup,
|
||||
Kind: clusterRoleKind,
|
||||
Name: KubeDNSClusterRoleName,
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: serviceAccountKind,
|
||||
Name: kubeadmconstants.KubeDNSServiceAccountName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{Kind: "Group", Name: "system:nodes"},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:node-proxier",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbacAPIGroup,
|
||||
Kind: clusterRoleKind,
|
||||
Name: KubeProxyClusterRoleName,
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: serviceAccountKind,
|
||||
Name: kubeadmconstants.KubeProxyServiceAccountName,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("[apiconfig] Created kube-proxy RBAC rules")
|
||||
|
||||
for _, clusterRoleBinding := range clusterRoleBindings {
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
83
cmd/kubeadm/app/phases/apiconfig/setupmaster.go
Normal file
83
cmd/kubeadm/app/phases/apiconfig/setupmaster.go
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
)
|
||||
|
||||
const apiCallRetryInterval = 500 * time.Millisecond
|
||||
|
||||
// TODO: Can we think of any unit tests here? Or should this code just be covered through integration/e2e tests?
|
||||
|
||||
// It's safe to do this for alpha, as we don't have HA and there is no way we can get
|
||||
// more then one node here (TODO(phase1+) use os.Hostname)
|
||||
func findMyself(client *clientset.Clientset) (*v1.Node, error) {
|
||||
nodeList, err := client.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes [%v]", err)
|
||||
}
|
||||
if len(nodeList.Items) < 1 {
|
||||
return nil, fmt.Errorf("no nodes found")
|
||||
}
|
||||
node := &nodeList.Items[0]
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error {
|
||||
n, err := findMyself(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Switch to the new master label defined in https://github.com/kubernetes/kubernetes/pull/39112
|
||||
n.ObjectMeta.Labels[metav1.NodeLabelKubeadmAlphaRole] = metav1.NodeLabelRoleMaster
|
||||
|
||||
// TODO: Use the Taints beta field on the NodeSpec now
|
||||
taintsAnnotation, _ := json.Marshal([]v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
n.ObjectMeta.Annotations[v1.TaintsAnnotationKey] = string(taintsAnnotation)
|
||||
|
||||
// TODO: Use a patch instead of an Update
|
||||
if _, err := client.Nodes().Update(n); err != nil {
|
||||
if apierrs.IsConflict(err) {
|
||||
fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)")
|
||||
time.Sleep(apiCallRetryInterval)
|
||||
attemptToUpdateMasterRoleLabelsAndTaints(client)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateMasterRoleLabelsAndTaints taints the master and sets the master label
|
||||
func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error {
|
||||
// TODO: Use iterate instead of recursion
|
||||
err := attemptToUpdateMasterRoleLabelsAndTaints(client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update master node - [%v]", err)
|
||||
}
|
||||
return nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user