Add a new, dedicated package for the addons and use templates instead of native Go types, and remove the previous Go files

This commit is contained in:
Lucas Käldström 2017-02-01 21:04:14 +02:00
parent 8f660dc24e
commit 183f71d57a
No known key found for this signature in database
GPG Key ID: 3FA3783D77751514
7 changed files with 472 additions and 448 deletions

View File

@ -29,6 +29,7 @@ go_library(
"//cmd/kubeadm/app/discovery:go_default_library",
"//cmd/kubeadm/app/master:go_default_library",
"//cmd/kubeadm/app/node:go_default_library",
"//cmd/kubeadm/app/phases/addons:go_default_library",
"//cmd/kubeadm/app/phases/apiconfig:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",

View File

@ -33,6 +33,7 @@ import (
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master"
addonsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons"
apiconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig"
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
@ -259,7 +260,7 @@ func (i *Init) Run(out io.Writer) error {
}
// PHASE 5: Deploy essential addons
if err := kubemaster.CreateEssentialAddons(i.cfg, client); err != nil {
if err := addonsphase.CreateEssentialAddons(i.cfg, client); err != nil {
return err
}

View File

@ -1,332 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"fmt"
"net"
"path"
"runtime"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
const KubeDNS = "kube-dns"
func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
privilegedTrue := true
return v1.PodSpec{
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
Containers: []v1.Container{{
Name: kubeProxy,
Image: images.GetCoreImage(images.KubeProxyImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: append(getProxyCommand(cfg), "--kubeconfig=/run/kubeconfig"),
SecurityContext: &v1.SecurityContext{Privileged: &privilegedTrue},
VolumeMounts: []v1.VolumeMount{
{
Name: "dbus",
MountPath: "/var/run/dbus",
ReadOnly: false,
},
{
// TODO there are handful of clever options to get around this, but it's
// easier to just mount kubelet's config here; we should probably just
// make sure that proxy reads the token and CA cert from /run/secrets
// and accepts `--master` at the same time
//
// clever options include:
// - do CSR dance and create kubeconfig and mount it as a secret
// - create a service account with a second secret encoding kubeconfig
// - use init container to convert known information to kubeconfig
// - ...whatever
Name: "kubeconfig",
MountPath: "/run/kubeconfig",
ReadOnly: false,
},
},
}},
Volumes: []v1.Volume{
{
Name: "kubeconfig",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.KubeletKubeConfigFileName)},
},
},
{
Name: "dbus",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/var/run/dbus"},
},
},
},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "beta.kubernetes.io/arch",
Operator: v1.NodeSelectorOpIn,
Values: []string{runtime.GOARCH},
},
},
},
},
},
},
},
}
}
func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
kubeDNSPort := int32(10053)
dnsmasqPort := int32(53)
return v1.PodSpec{
ServiceAccountName: KubeDNS,
Containers: []v1.Container{
// DNS server
{
Name: "kubedns",
Image: images.GetAddonImage(images.KubeDNSImage),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse("170Mi"),
},
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("70Mi"),
},
},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthcheck/kubedns",
Port: intstr.FromInt(10054),
Scheme: v1.URISchemeHTTP,
},
},
InitialDelaySeconds: 60,
TimeoutSeconds: 5,
SuccessThreshold: 1,
FailureThreshold: 5,
},
// # we poll on pod startup for the Kubernetes master service and
// # only setup the /readiness HTTP server once that's available.
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/readiness",
Port: intstr.FromInt(8081),
Scheme: v1.URISchemeHTTP,
},
},
InitialDelaySeconds: 3,
TimeoutSeconds: 5,
},
Args: []string{
fmt.Sprintf("--domain=%s", cfg.Networking.DNSDomain),
fmt.Sprintf("--dns-port=%d", kubeDNSPort),
"--config-map=kube-dns",
"--v=2",
},
Env: []v1.EnvVar{
{
Name: "PROMETHEUS_PORT",
Value: "10055",
},
},
Ports: []v1.ContainerPort{
{
ContainerPort: kubeDNSPort,
Name: "dns-local",
Protocol: v1.ProtocolUDP,
},
{
ContainerPort: kubeDNSPort,
Name: "dns-tcp-local",
Protocol: v1.ProtocolTCP,
},
{
ContainerPort: 10055,
Name: "metrics",
Protocol: v1.ProtocolTCP,
},
},
},
// dnsmasq
{
Name: "dnsmasq",
Image: images.GetAddonImage(images.KubeDNSmasqImage),
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthcheck/dnsmasq",
Port: intstr.FromInt(10054),
Scheme: v1.URISchemeHTTP,
},
},
InitialDelaySeconds: 60,
TimeoutSeconds: 5,
SuccessThreshold: 1,
FailureThreshold: 5,
},
Args: []string{
"--cache-size=1000",
"--no-resolv",
fmt.Sprintf("--server=127.0.0.1#%d", kubeDNSPort),
"--log-facility=-",
},
Ports: []v1.ContainerPort{
{
ContainerPort: dnsmasqPort,
Name: "dns",
Protocol: v1.ProtocolUDP,
},
{
ContainerPort: dnsmasqPort,
Name: "dns-tcp",
Protocol: v1.ProtocolTCP,
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("150m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10Mi"),
},
},
},
{
Name: "sidecar",
Image: images.GetAddonImage(images.KubeDNSSidecarImage),
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/metrics",
Port: intstr.FromInt(10054),
Scheme: v1.URISchemeHTTP,
},
},
InitialDelaySeconds: 60,
TimeoutSeconds: 5,
SuccessThreshold: 1,
FailureThreshold: 5,
},
Args: []string{
"--v=2",
"--logtostderr",
fmt.Sprintf("--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.%s,5,A", cfg.Networking.DNSDomain),
fmt.Sprintf("--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.%s,5,A", cfg.Networking.DNSDomain),
},
Ports: []v1.ContainerPort{
{
ContainerPort: 10054,
Name: "metrics",
Protocol: v1.ProtocolTCP,
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse("20Mi"),
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"),
},
},
},
},
DNSPolicy: v1.DNSDefault,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "beta.kubernetes.io/arch",
Operator: v1.NodeSelectorOpIn,
Values: []string{runtime.GOARCH},
},
},
},
},
},
},
},
}
}
func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*v1.ServiceSpec, error) {
_, n, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
if err != nil {
return nil, fmt.Errorf("could not parse %q: %v", cfg.Networking.ServiceSubnet, err)
}
ip, err := ipallocator.GetIndexedIP(n, 10)
if err != nil {
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR %q: [%v]", cfg.Networking.ServiceSubnet, err)
}
return &v1.ServiceSpec{
Selector: map[string]string{"name": KubeDNS},
Ports: []v1.ServicePort{
{Name: "dns", Port: 53, Protocol: v1.ProtocolUDP},
{Name: "dns-tcp", Port: 53, Protocol: v1.ProtocolTCP},
},
ClusterIP: ip.String(),
}, nil
}
func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
kubeProxyDaemonSet := NewDaemonSet(kubeProxy, createKubeProxyPodSpec(cfg))
SetMasterTaintTolerations(&kubeProxyDaemonSet.Spec.Template.ObjectMeta)
if _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil {
return fmt.Errorf("failed creating essential kube-proxy addon [%v]", err)
}
fmt.Println("[addons] Created essential addon: kube-proxy")
kubeDNSDeployment := NewDeployment(KubeDNS, 1, createKubeDNSPodSpec(cfg))
SetMasterTaintTolerations(&kubeDNSDeployment.Spec.Template.ObjectMeta)
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kubeDNSDeployment); err != nil {
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
}
kubeDNSServiceSpec, err := createKubeDNSServiceSpec(cfg)
if err != nil {
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
}
kubeDNSService := NewService(KubeDNS, *kubeDNSServiceSpec)
kubeDNSService.ObjectMeta.Labels["kubernetes.io/name"] = "KubeDNS"
if _, err := client.Services(metav1.NamespaceSystem).Create(kubeDNSService); err != nil {
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
}
fmt.Println("[addons] Created essential addon: kube-dns")
return nil
}

View File

@ -1,115 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"testing"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
)
func TestCreateKubeProxyPodSpec(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected bool
}{
{
cfg: &kubeadmapi.MasterConfiguration{},
expected: true,
},
}
for _, rt := range tests {
actual := createKubeProxyPodSpec(rt.cfg)
if (actual.Containers[0].Name != "") != rt.expected {
t.Errorf(
"failed createKubeProxyPodSpec:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual.Containers[0].Name != ""),
)
}
}
}
func TestCreateKubeDNSPodSpec(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected string
}{
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadm.Networking{DNSDomain: "localhost"},
},
expected: "--domain=localhost",
},
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadm.Networking{DNSDomain: "foo"},
},
expected: "--domain=foo",
},
}
for _, rt := range tests {
actual := createKubeDNSPodSpec(rt.cfg)
if actual.Containers[0].Args[0] != rt.expected {
t.Errorf(
"failed createKubeDNSPodSpec:\n\texpected: %s\n\t actual: %s",
rt.expected,
actual.Containers[0].Args[0],
)
}
}
}
func TestCreateKubeDNSServiceSpec(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.MasterConfiguration
expected bool
}{
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadm.Networking{ServiceSubnet: "foo"},
},
expected: false,
},
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadm.Networking{ServiceSubnet: "10.0.0.1/1"},
},
expected: false,
},
{
cfg: &kubeadmapi.MasterConfiguration{
Networking: kubeadm.Networking{ServiceSubnet: "10.0.0.1/24"},
},
expected: true,
},
}
for _, rt := range tests {
_, actual := createKubeDNSServiceSpec(rt.cfg)
if (actual == nil) != rt.expected {
t.Errorf(
"failed createKubeDNSServiceSpec:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
}
}

View File

@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"addons.go",
"manifests.go",
],
tags = ["automanaged"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/registry/core/service/ipallocator:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,155 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addons
import (
"fmt"
"net"
"runtime"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kuberuntime "k8s.io/apimachinery/pkg/runtime"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
// CreateEssentialAddons creates the kube-proxy and kube-dns addons
func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
proxyConfigMapBytes, err := kubeadmutil.ParseTemplate(KubeProxyConfigMap, struct{ MasterEndpoint string }{
// Fetch this value from the kubeconfig file
MasterEndpoint: fmt.Sprintf("https://%s:%d", cfg.API.AdvertiseAddresses[0], cfg.API.Port),
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
proxyDaemonSetBytes, err := kubeadmutil.ParseTemplate(KubeProxyDaemonSet, struct{ ImageRepository, Arch, Version string }{
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
Arch: runtime.GOARCH,
// TODO: Fetch the version from the {API Server IP}/version
Version: cfg.KubernetesVersion,
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct {
ImageRepository, Arch, Version, DNSDomain string
Replicas int
}{
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
Arch: runtime.GOARCH,
// TODO: Support larger amount of replicas?
Replicas: 1,
Version: KubeDNSVersion,
DNSDomain: cfg.Networking.DNSDomain,
})
if err != nil {
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)
}
// Get the DNS IP
dnsip, err := getDNSIP(cfg.Networking.ServiceSubnet)
if err != nil {
return err
}
dnsServiceBytes, err := kubeadmutil.ParseTemplate(KubeDNSService, struct{ DNSIP string }{
DNSIP: dnsip.String(),
})
if err != nil {
return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err)
}
err = CreateKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client)
if err != nil {
return err
}
fmt.Println("[addons] Created essential addon: kube-proxy")
err = CreateKubeDNSAddon(dnsDeploymentBytes, dnsServiceBytes, client)
if err != nil {
return err
}
fmt.Println("[addons] Created essential addon: kube-dns")
return nil
}
func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client *clientset.Clientset) error {
kubeproxyConfigMap := &v1.ConfigMap{}
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), configMapBytes, kubeproxyConfigMap); err != nil {
return fmt.Errorf("unable to decode kube-proxy configmap %v", err)
}
if _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(kubeproxyConfigMap); err != nil {
return fmt.Errorf("unable to create a new kube-proxy configmap: %v", err)
}
kubeproxyDaemonSet := &extensions.DaemonSet{}
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil {
return fmt.Errorf("unable to decode kube-proxy daemonset %v", err)
}
if _, err := client.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).Create(kubeproxyDaemonSet); err != nil {
return fmt.Errorf("unable to create a new kube-proxy daemonset: %v", err)
}
return nil
}
func CreateKubeDNSAddon(deploymentBytes, serviceBytes []byte, client *clientset.Clientset) error {
kubednsDeployment := &extensions.Deployment{}
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
return fmt.Errorf("unable to decode kube-dns deployment %v", err)
}
// TODO: All these .Create(foo) calls should instead be more like "kubectl apply -f" commands; they should not fail if there are existing objects with the same name
if _, err := client.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).Create(kubednsDeployment); err != nil {
return fmt.Errorf("unable to create a new kube-dns deployment: %v", err)
}
kubednsService := &v1.Service{}
if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), serviceBytes, kubednsService); err != nil {
return fmt.Errorf("unable to decode kube-dns service %v", err)
}
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(kubednsService); err != nil {
return fmt.Errorf("unable to create a new kube-dns service: %v", err)
}
return nil
}
// TODO: Instead of looking at the subnet given to kubeadm, it should be possible to only use /28 or larger subnets and then
// kubeadm should look at the kubernetes service (e.g. 10.96.0.1 or 10.0.0.1) and just append a "0" at the end.
// This way, we don't need the information about the subnet in this phase => good
func getDNSIP(subnet string) (net.IP, error) {
_, n, err := net.ParseCIDR(subnet)
if err != nil {
return nil, fmt.Errorf("could not parse %q: %v", subnet, err)
}
ip, err := ipallocator.GetIndexedIP(n, 10)
if err != nil {
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR %q: [%v]", subnet, err)
}
return ip, nil
}

View File

@ -0,0 +1,273 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addons
const (
KubeProxyConfigMap = `
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-proxy
namespace: kube-system
labels:
app: kube-proxy
data:
kubeconfig.conf: |
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: {{ .MasterEndpoint }}
name: default
contexts:
- context:
cluster: default
namespace: default
user: default
name: default
current-context: default
users:
- name: default
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`
KubeProxyDaemonSet = `
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-proxy
kubernetes.io/cluster-service: "true"
name: kube-proxy
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-proxy
template:
metadata:
labels:
k8s-app: kube-proxy
kubernetes.io/cluster-service: "true"
annotations:
# TODO: Move this to the beta tolerations field below as soon as the Tolerations field exists in PodSpec
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","value":"master","effect":"NoSchedule"}]'
spec:
containers:
- name: kube-proxy
image: {{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
command:
- kube-proxy
- --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kube-proxy
name: kube-proxy
hostNetwork: true
serviceAccountName: kube-proxy
# Tolerate running on the master
# tolerations:
# - key: dedicated
# value: master
# effect: NoSchedule
volumes:
- name: kube-proxy
configMap:
name: kube-proxy
`
KubeDNSVersion = "1.11.0"
KubeDNSDeployment = `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
namespace: kube-system
spec:
replicas: {{ .Replicas }}
selector:
matchLabels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
annotations:
# TODO: Move this to the beta tolerations field below as soon as the Tolerations field exists in PodSpec
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","value":"master","effect":"NoSchedule"}]'
spec:
containers:
- name: kubedns
image: {{ .ImageRepository }}/k8s-dns-kube-dns-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
args:
- --domain={{ .DNSDomain }}
- --dns-port=10053
- --config-map=kube-dns
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
- name: dnsmasq
image: {{ .ImageRepository }}/k8s-dns-dnsmasq-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
args:
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
- --log-facility=-
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 150m
memory: 10Mi
- name: sidecar
image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ .DNSDomain }},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ .DNSDomain }},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 10m
memory: 20Mi
dnsPolicy: Default
serviceAccountName: kube-dns
# tolerations:
# - key: dedicated
# value: master
# effect: NoSchedule
# TODO: Remove this affinity field as soon as we are using manifest lists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/arch
operator: In
values:
- {{ .Arch }}
`
KubeDNSService = `
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
name: kube-dns
namespace: kube-system
spec:
clusterIP: {{ .DNSIP }}
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
selector:
k8s-app: kube-dns
`
)