Merge pull request #41835 from luxas/kubeadm_beta_label

Automatic merge from submit-queue (batch tested with PRs 41857, 41864, 40522, 41835, 41991)

kubeadm: Use a new label for marking and tainting the master node

**What this PR does / why we need it**:

Implements https://github.com/kubernetes/kubernetes/pull/39112 for kubeadm until that PR is merged. I want to proceed on this stuff, it has been pending for too long already.

It's at least pretty safe to apply this for kubeadm now, since we're still in alpha in v1.6 implementation-wise, only the CLI will be beta.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
Switch to the `node-role.kubernetes.io/master` label for marking and tainting the master node in kubeadm
```
cc @deads2k @liggitt @smarterclayton @jbeda @roberthbailey @mikedanese @justinsb @sttts @kubernetes/api-approvers @kubernetes/api-reviewers
This commit is contained in:
Kubernetes Submit Queue 2017-02-26 11:13:57 -08:00 committed by GitHub
commit 861f4179bc
10 changed files with 116 additions and 39 deletions

View File

@ -11,6 +11,7 @@ go_library(
name = "go_default_library",
srcs = ["constants.go"],
tags = ["automanaged"],
deps = ["//vendor:k8s.io/client-go/pkg/api/v1"],
)
filegroup(

View File

@ -19,6 +19,8 @@ package constants
import (
"path"
"time"
"k8s.io/client-go/pkg/api/v1"
)
const (
@ -69,6 +71,10 @@ const (
// DefaultTokenDuration specifies the default amount of time that a bootstrap token will be valid
DefaultTokenDuration = time.Duration(8) * time.Hour
// LabelNodeRoleMaster specifies that a node is a master
// It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
LabelNodeRoleMaster = "node-role.kubernetes.io/master"
// CSVTokenBootstrapUser is currently the user the bootstrap token in the .csv file
// TODO: This should change to something more official and supported
// TODO: Prefix with kubeadm prefix
@ -80,6 +86,13 @@ const (
)
var (
// MasterToleration is the toleration to apply on the PodSpec for being able to run that Pod on the master
MasterToleration = v1.Toleration{
Key: LabelNodeRoleMaster,
Effect: v1.TaintEffectNoSchedule,
}
AuthorizationPolicyPath = path.Join(KubernetesDir, "abac_policy.json")
AuthorizationWebhookConfigPath = path.Join(KubernetesDir, "webhook_authz.conf")
)

View File

@ -107,9 +107,10 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli
}
func createDiscoveryDeployment(client *clientset.Clientset) error {
discoveryBytes, err := kubeadmutil.ParseTemplate(KubeDiscoveryDeployment, struct{ ImageRepository, Arch string }{
discoveryBytes, err := kubeadmutil.ParseTemplate(KubeDiscoveryDeployment, struct{ ImageRepository, Arch, MasterTaintKey string }{
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
Arch: runtime.GOARCH,
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
return fmt.Errorf("error when parsing kube-discovery template: %v", err)
@ -119,6 +120,10 @@ func createDiscoveryDeployment(client *clientset.Clientset) error {
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), discoveryBytes, discoveryDeployment); err != nil {
return fmt.Errorf("unable to decode kube-discovery deployment %v", err)
}
// TODO: Set this in the yaml spec instead
discoveryDeployment.Spec.Template.Spec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration}
if _, err := client.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).Create(discoveryDeployment); err != nil {
return fmt.Errorf("unable to create a new discovery deployment: %v", err)
}

View File

@ -218,7 +218,7 @@ func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, vo
Resources: componentResources("250m"),
},
},
Tolerations: getMasterToleration(),
Tolerations: []v1.Toleration{kubeadmconstants.MasterToleration},
},
},
},
@ -269,7 +269,7 @@ func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes
Env: getProxyEnvVars(),
},
},
Tolerations: getMasterToleration(),
Tolerations: []v1.Toleration{kubeadmconstants.MasterToleration},
DNSPolicy: v1.DNSDefault,
},
},
@ -319,7 +319,7 @@ func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment
Env: getProxyEnvVars(),
},
},
Tolerations: getMasterToleration(),
Tolerations: []v1.Toleration{kubeadmconstants.MasterToleration},
},
},
},
@ -330,15 +330,3 @@ func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment
func buildStaticManifestFilepath(name string) string {
return path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests", name+".yaml")
}
func getMasterToleration() []v1.Toleration {
// Tolerate the master taint we add to our master nodes, as this can and should
// run there.
// TODO: Duplicated above
return []v1.Toleration{{
Key: "dedicated",
Value: "master",
Operator: v1.TolerationOpEqual,
Effect: v1.TaintEffectNoSchedule,
}}
}

View File

@ -75,10 +75,10 @@ spec:
name: clusterinfo
readOnly: true
hostNetwork: true
tolerations:
- key: "dedicated"
value: "master"
effect: "NoSchedule"
# TODO: Why doesn't the Decoder recognize this new field and decode it properly? Right now it's ignored
# tolerations:
# - key: {{ .MasterTaintKey }}
# effect: NoSchedule
securityContext:
seLinuxOptions:
type: spc_t

View File

@ -17,6 +17,7 @@ go_library(
tags = ["automanaged"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
@ -46,4 +47,5 @@ go_test(
srcs = ["addons_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = ["//cmd/kubeadm/app/util:go_default_library"],
)

View File

@ -29,6 +29,7 @@ import (
"k8s.io/client-go/pkg/api/v1"
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
@ -43,19 +44,21 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet, struct{ Image, ClusterCIDR string }{
Image: images.GetCoreImage("proxy", cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet),
proxyDaemonSetBytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet, struct{ Image, ClusterCIDR, MasterTaintKey string }{
Image: images.GetCoreImage("proxy", cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet),
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct{ ImageRepository, Arch, Version, DNSDomain string }{
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct{ ImageRepository, Arch, Version, DNSDomain, MasterTaintKey string }{
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
Arch: runtime.GOARCH,
Version: KubeDNSVersion,
DNSDomain: cfg.Networking.DNSDomain,
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)
@ -101,6 +104,7 @@ func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client *clients
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil {
return fmt.Errorf("unable to decode kube-proxy daemonset %v", err)
}
kubeproxyDaemonSet.Spec.Template.Spec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration}
if _, err := client.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).Create(kubeproxyDaemonSet); err != nil {
return fmt.Errorf("unable to create a new kube-proxy daemonset: %v", err)
@ -113,6 +117,13 @@ func CreateKubeDNSAddon(deploymentBytes, serviceBytes []byte, client *clientset.
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
return fmt.Errorf("unable to decode kube-dns deployment %v", err)
}
kubednsDeployment.Spec.Template.Spec.Tolerations = []v1.Toleration{
kubeadmconstants.MasterToleration,
{
Key: "CriticalAddonsOnly",
Operator: "Exists",
},
}
// TODO: All these .Create(foo) calls should instead be more like "kubectl apply -f" commands; they should not fail if there are existing objects with the same name
if _, err := client.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).Create(kubednsDeployment); err != nil {

View File

@ -16,7 +16,11 @@ limitations under the License.
package addons
import "testing"
import (
"testing"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
func TestGetClusterCIDR(t *testing.T) {
emptyClusterCIDR := getClusterCIDR("")
@ -29,3 +33,56 @@ func TestGetClusterCIDR(t *testing.T) {
t.Errorf("Invalid format: %s", clusterCIDR)
}
}
func TestCompileManifests(t *testing.T) {
var tests = []struct {
manifest string
data interface{}
expected bool
}{
{
manifest: KubeProxyConfigMap,
data: struct{ MasterEndpoint string }{
MasterEndpoint: "foo",
},
expected: true,
},
{
manifest: KubeProxyDaemonSet,
data: struct{ Image, ClusterCIDR, MasterTaintKey string }{
Image: "foo",
ClusterCIDR: "foo",
MasterTaintKey: "foo",
},
expected: true,
},
{
manifest: KubeDNSDeployment,
data: struct{ ImageRepository, Arch, Version, DNSDomain, MasterTaintKey string }{
ImageRepository: "foo",
Arch: "foo",
Version: "foo",
DNSDomain: "foo",
MasterTaintKey: "foo",
},
expected: true,
},
{
manifest: KubeDNSService,
data: struct{ DNSIP string }{
DNSIP: "foo",
},
expected: true,
},
}
for _, rt := range tests {
_, actual := kubeadmutil.ParseTemplate(rt.manifest, rt.data)
if (actual == nil) != rt.expected {
t.Errorf(
"failed CompileManifests:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}

View File

@ -79,10 +79,10 @@ spec:
name: kube-proxy
hostNetwork: true
serviceAccountName: kube-proxy
tolerations:
- key: dedicated
value: master
effect: NoSchedule
# TODO: Why doesn't the Decoder recognize this new field and decode it properly? Right now it's ignored
# tolerations:
# - key: {{ .MasterTaintKey }}
# effect: NoSchedule
volumes:
- name: kube-proxy
configMap:
@ -235,12 +235,12 @@ spec:
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "dedicated"
value: "master"
effect: "NoSchedule"
# TODO: Why doesn't the Decoder recognize this new field and decode it properly? Right now it's ignored
# tolerations:
# - key: CriticalAddonsOnly
# operator: Exists
# - key: {{ .MasterTaintKey }}
# effect: NoSchedule
# TODO: Remove this affinity field as soon as we are using manifest lists
affinity:
nodeAffinity:

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
const apiCallRetryInterval = 500 * time.Millisecond
@ -58,10 +59,9 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error
return err
}
// TODO: Switch to the new master label defined in https://github.com/kubernetes/kubernetes/pull/39112
n.ObjectMeta.Labels[metav1.NodeLabelKubeadmAlphaRole] = metav1.NodeLabelRoleMaster
n.Spec.Taints = []v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}
// The master node is tainted and labelled accordingly
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
n.Spec.Taints = []v1.Taint{{Key: kubeadmconstants.LabelNodeRoleMaster, Value: "", Effect: "NoSchedule"}}
newData, err := json.Marshal(n)
if err != nil {