Merge pull request #40154 from liggitt/kubeadm-rbac

Automatic merge from submit-queue (batch tested with PRs 36693, 40154, 40170, 39033)

kubeadm: RBAC cleanup

builds on https://github.com/kubernetes/kubernetes/pull/40153 and includes some follow-up items from https://github.com/kubernetes/kubernetes/pull/39846
This commit is contained in:
Kubernetes Submit Queue 2017-01-20 09:18:51 -08:00 committed by GitHub
commit 49cb10c843
4 changed files with 25 additions and 41 deletions

View File

@ -73,11 +73,10 @@ func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) {
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
if err != nil {
if apierrs.IsForbidden(err) {
fmt.Print("\r[apiclient] Waiting for the API server to create RBAC policies")
fmt.Println("[apiclient] Waiting for API server authorization")
}
return false, nil
}
fmt.Println("\n[apiclient] RBAC policies created")
// TODO(phase2) must revisit this when we implement HA
if len(cs.Items) < 3 {
fmt.Println("[apiclient] Not all control plane components are ready yet")

View File

@ -369,7 +369,7 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
"--service-account-private-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
"--cluster-signing-cert-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
"--cluster-signing-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca-key.pem",
"--insecure-experimental-approve-all-kubelet-csrs-for-group=kubeadm:kubelet-bootstrap",
"--insecure-experimental-approve-all-kubelet-csrs-for-group="+KubeletBootstrapGroup,
)
if cfg.CloudProvider != "" {

View File

@ -27,12 +27,19 @@ import (
"k8s.io/kubernetes/pkg/util/uuid"
)
const (
// TODO: prefix with kubeadm prefix
KubeletBootstrapUser = "kubeadm-node-csr"
KubeletBootstrapGroup = "kubeadm:kubelet-bootstrap"
)
func CreateTokenAuthFile(bt string) error {
tokenAuthFilePath := path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, "tokens.csv")
if err := os.MkdirAll(kubeadmapi.GlobalEnvParams.HostPKIPath, 0700); err != nil {
return fmt.Errorf("failed to create directory %q [%v]", kubeadmapi.GlobalEnvParams.HostPKIPath, err)
}
serialized := []byte(fmt.Sprintf("%s,kubeadm-node-csr,%s,kubeadm:kubelet-bootstrap\n", bt, uuid.NewUUID()))
serialized := []byte(fmt.Sprintf("%s,%s,%s,%s\n", bt, KubeletBootstrapUser, uuid.NewUUID(), KubeletBootstrapGroup))
// DumpReaderToFile create a file with mode 0600
if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), tokenAuthFilePath); err != nil {
return fmt.Errorf("failed to save token auth file (%q) [%v]", tokenAuthFilePath, err)

View File

@ -26,24 +26,8 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
)
// CreateBootstrapRBACClusterRole creates the necessary ClusterRole for bootstrapping
// CreateBootstrapRBACClusterRole grants the system:node-bootstrapper role to the group we created the bootstrap credential with
func CreateBootstrapRBACClusterRole(clientset *clientset.Clientset) error {
clusterRole := rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "kubeadm:kubelet-bootstrap"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get").Groups("").Resources("nodes").RuleOrDie(),
rbac.NewRule("create", "watch").Groups("certificates.k8s.io").Resources("certificatesigningrequests").RuleOrDie(),
},
}
if _, err := clientset.Rbac().ClusterRoles().Create(&clusterRole); err != nil {
return err
}
subject := rbac.Subject{
Kind: "Group",
Name: "kubeadm:kubelet-bootstrap",
}
clusterRoleBinding := rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubeadm:kubelet-bootstrap",
@ -51,14 +35,16 @@ func CreateBootstrapRBACClusterRole(clientset *clientset.Clientset) error {
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "kubeadm:kubelet-bootstrap",
Name: "system:node-bootstrapper",
},
Subjects: []rbac.Subject{
{Kind: "Group", Name: master.KubeletBootstrapGroup},
},
Subjects: []rbac.Subject{subject},
}
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
return err
}
fmt.Println("[apiconfig] Created kubelet-bootstrap RBAC rules")
fmt.Println("[apiconfig] Created node bootstrapper RBAC rules")
return nil
}
@ -90,7 +76,7 @@ func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error {
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "kubeadm:" + master.KubeDNS,
Name: clusterRole.Name,
},
Subjects: []rbac.Subject{subject},
}
@ -102,32 +88,24 @@ func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error {
return nil
}
// CreateKubeProxyClusterRoleBinding creates the necessary ClusterRole for kube-dns
// CreateKubeProxyClusterRoleBinding grants the system:node-proxier role to the nodes group,
// since kubelet credentials are used to run the kube-proxy
// TODO: give the kube-proxy its own credential and stop requiring this
func CreateKubeProxyClusterRoleBinding(clientset *clientset.Clientset) error {
systemKubeProxySubject := rbac.Subject{
Kind: "User",
Name: "system:kube-proxy",
Namespace: api.NamespaceSystem,
}
systemNodesSubject := rbac.Subject{
Kind: "Group",
Name: "system:nodes",
Namespace: api.NamespaceSystem,
}
clusterRoleBinding := rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "system:node-proxier",
Name: "kubeadm:node-proxier",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:node-proxier",
},
Subjects: []rbac.Subject{systemKubeProxySubject, systemNodesSubject},
Subjects: []rbac.Subject{
{Kind: "Group", Name: "system:nodes"},
},
}
if _, err := clientset.Rbac().ClusterRoleBindings().Update(&clusterRoleBinding); err != nil {
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
return err
}
fmt.Println("[apiconfig] Created kube-proxy RBAC rules")