mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #29948 from mbruzek/juju-kubedns-update
Automatic merge from submit-queue Replacing skydns with kubedns for the juju cluster. #29720 ```release-note * Updating the cluster/juju provider to use kubedns in place of skydns. ```
This commit is contained in:
commit
33239c1e6f
@ -131,36 +131,47 @@ def ca():
|
|||||||
|
|
||||||
|
|
||||||
@when('kubelet.available', 'leadership.is_leader')
|
@when('kubelet.available', 'leadership.is_leader')
|
||||||
@when_not('skydns.available')
|
@when_not('kubedns.available', 'skydns.available')
|
||||||
def launch_skydns():
|
def launch_dns():
|
||||||
'''Create the "kube-system" namespace, the skydns resource controller, and
|
'''Create the "kube-system" namespace, the kubedns resource controller,
|
||||||
the skydns service. '''
|
and the kubedns service. '''
|
||||||
hookenv.log('Creating kubernetes skydns on the master node.')
|
hookenv.log('Creating kubernetes kubedns on the master node.')
|
||||||
# Only launch and track this state on the leader.
|
# Only launch and track this state on the leader.
|
||||||
# Launching duplicate SkyDNS rc will raise an error
|
# Launching duplicate kubeDNS rc will raise an error
|
||||||
# Run a command to check if the apiserver is responding.
|
# Run a command to check if the apiserver is responding.
|
||||||
return_code = call(split('kubectl cluster-info'))
|
return_code = call(split('kubectl cluster-info'))
|
||||||
if return_code != 0:
|
if return_code != 0:
|
||||||
hookenv.log('kubectl command failed, waiting for apiserver to start.')
|
hookenv.log('kubectl command failed, waiting for apiserver to start.')
|
||||||
remove_state('skydns.available')
|
remove_state('kubedns.available')
|
||||||
# Return without setting skydns.available so this method will retry.
|
# Return without setting kubedns.available so this method will retry.
|
||||||
return
|
return
|
||||||
# Check for the "kube-system" namespace.
|
# Check for the "kube-system" namespace.
|
||||||
return_code = call(split('kubectl get namespace kube-system'))
|
return_code = call(split('kubectl get namespace kube-system'))
|
||||||
if return_code != 0:
|
if return_code != 0:
|
||||||
# Create the kube-system namespace that is used by the skydns files.
|
# Create the kube-system namespace that is used by the kubedns files.
|
||||||
check_call(split('kubectl create namespace kube-system'))
|
check_call(split('kubectl create namespace kube-system'))
|
||||||
# Check for the skydns replication controller.
|
# Check for the kubedns replication controller.
|
||||||
return_code = call(split('kubectl get -f files/manifests/skydns-rc.yml'))
|
return_code = call(split('kubectl get -f files/manifests/kubedns-rc.yaml'))
|
||||||
if return_code != 0:
|
if return_code != 0:
|
||||||
# Create the skydns replication controller from the rendered file.
|
# Create the kubedns replication controller from the rendered file.
|
||||||
check_call(split('kubectl create -f files/manifests/skydns-rc.yml'))
|
check_call(split('kubectl create -f files/manifests/kubedns-rc.yaml'))
|
||||||
# Check for the skydns service.
|
# Check for the kubedns service.
|
||||||
return_code = call(split('kubectl get -f files/manifests/skydns-svc.yml'))
|
return_code = call(split('kubectl get -f files/manifests/kubedns-svc.yaml'))
|
||||||
if return_code != 0:
|
if return_code != 0:
|
||||||
# Create the skydns service from the rendered file.
|
# Create the kubedns service from the rendered file.
|
||||||
check_call(split('kubectl create -f files/manifests/skydns-svc.yml'))
|
check_call(split('kubectl create -f files/manifests/kubedns-svc.yaml'))
|
||||||
set_state('skydns.available')
|
set_state('kubedns.available')
|
||||||
|
|
||||||
|
|
||||||
|
@when('skydns.available', 'leadership.is_leader')
|
||||||
|
def convert_to_kubedns():
|
||||||
|
'''Delete the skydns containers to make way for the kubedns containers.'''
|
||||||
|
hookenv.log('Deleteing the old skydns deployment.')
|
||||||
|
# Delete the skydns replication controller.
|
||||||
|
return_code = call(split('kubectl delete rc kube-dns-v11'))
|
||||||
|
# Delete the skydns service.
|
||||||
|
return_code = call(split('kubectl delete svc kube-dns'))
|
||||||
|
remove_state('skydns.available')
|
||||||
|
|
||||||
|
|
||||||
@when('docker.available')
|
@when('docker.available')
|
||||||
@ -297,11 +308,11 @@ def gather_sdn_data():
|
|||||||
else:
|
else:
|
||||||
# There is no SDN cider fall back to the kubernetes config cidr option.
|
# There is no SDN cider fall back to the kubernetes config cidr option.
|
||||||
pillar['dns_server'] = get_dns_ip(hookenv.config().get('cidr'))
|
pillar['dns_server'] = get_dns_ip(hookenv.config().get('cidr'))
|
||||||
# The pillar['dns_server'] value is used the skydns-svc.yml file.
|
# The pillar['dns_server'] value is used the kubedns-svc.yaml file.
|
||||||
pillar['dns_replicas'] = 1
|
pillar['dns_replicas'] = 1
|
||||||
# The pillar['dns_domain'] value is ued in the skydns-rc.yml
|
# The pillar['dns_domain'] value is used in the kubedns-rc.yaml
|
||||||
pillar['dns_domain'] = hookenv.config().get('dns_domain')
|
pillar['dns_domain'] = hookenv.config().get('dns_domain')
|
||||||
# Use a 'pillar' dictionary so we can reuse the upstream skydns templates.
|
# Use a 'pillar' dictionary so we can reuse the upstream kubedns templates.
|
||||||
sdn_data['pillar'] = pillar
|
sdn_data['pillar'] = pillar
|
||||||
return sdn_data
|
return sdn_data
|
||||||
|
|
||||||
@ -400,14 +411,14 @@ def render_files(reldata=None):
|
|||||||
# Render the files/manifests/master.json that contains parameters for
|
# Render the files/manifests/master.json that contains parameters for
|
||||||
# the apiserver, controller, and controller-manager
|
# the apiserver, controller, and controller-manager
|
||||||
render('master.json', target, context)
|
render('master.json', target, context)
|
||||||
# Source: ...master/cluster/addons/dns/skydns-svc.yaml.in
|
# Source: ...cluster/addons/dns/skydns-svc.yaml.in
|
||||||
target = os.path.join(rendered_manifest_dir, 'skydns-svc.yml')
|
target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
|
||||||
# Render files/kubernetes/skydns-svc.yaml for SkyDNS service.
|
# Render files/kubernetes/kubedns-svc.yaml for the DNS service.
|
||||||
render('skydns-svc.yml', target, context)
|
render('kubedns-svc.yaml', target, context)
|
||||||
# Source: ...master/cluster/addons/dns/skydns-rc.yaml.in
|
# Source: ...cluster/addons/dns/skydns-rc.yaml.in
|
||||||
target = os.path.join(rendered_manifest_dir, 'skydns-rc.yml')
|
target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
|
||||||
# Render files/kubernetes/skydns-rc.yaml for SkyDNS pod.
|
# Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
|
||||||
render('skydns-rc.yml', target, context)
|
render('kubedns-rc.yaml', target, context)
|
||||||
|
|
||||||
|
|
||||||
def status_set(level, message):
|
def status_set(level, message):
|
||||||
|
115
cluster/juju/layers/kubernetes/templates/kubedns-rc.yaml
Normal file
115
cluster/juju/layers/kubernetes/templates/kubedns-rc.yaml
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
# Copyright 2016 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This file should be kept in sync with cluster/images/hyperkube/dns-rc.yaml
|
||||||
|
|
||||||
|
# Warning: This is a file generated from the base underscore template file: skydns-rc.yaml.base
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ReplicationController
|
||||||
|
metadata:
|
||||||
|
name: kube-dns-v18
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
version: v18
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
replicas: {{ pillar['dns_replicas'] }}
|
||||||
|
selector:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
version: v18
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
version: v18
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kubedns
|
||||||
|
image: gcr.io/google_containers/kubedns-{{ arch }}:1.6
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 100Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 8080
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readiness
|
||||||
|
port: 8081
|
||||||
|
scheme: HTTP
|
||||||
|
# we poll on pod startup for the Kubernetes master service and
|
||||||
|
# only setup the /readiness HTTP server once that's available.
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
args:
|
||||||
|
# command = "/kube-dns"
|
||||||
|
- --domain={{ pillar['dns_domain'] }}.
|
||||||
|
- --dns-port=10053
|
||||||
|
- --kube_master_url=http://{{ private_address }}:8080
|
||||||
|
{{ pillar['federations_domain_map'] }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-local
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-tcp-local
|
||||||
|
protocol: TCP
|
||||||
|
- name: dnsmasq
|
||||||
|
image: gcr.io/google_containers/kube-dnsmasq-{{ arch }}:1.3
|
||||||
|
args:
|
||||||
|
- --cache-size=1000
|
||||||
|
- --no-resolv
|
||||||
|
- --server=127.0.0.1#10053
|
||||||
|
ports:
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
- name: healthz
|
||||||
|
image: gcr.io/google_containers/exechealthz-{{ arch }}:1.0
|
||||||
|
resources:
|
||||||
|
# keep request = limit to keep this container in guaranteed class
|
||||||
|
limits:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
args:
|
||||||
|
- -cmd=nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} 127.0.0.1:10053 >/dev/null
|
||||||
|
- -port=8080
|
||||||
|
- -quiet
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
38
cluster/juju/layers/kubernetes/templates/kubedns-svc.yaml
Normal file
38
cluster/juju/layers/kubernetes/templates/kubedns-svc.yaml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright 2016 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This file should be kept in sync with cluster/images/hyperkube/dns-svc.yaml
|
||||||
|
|
||||||
|
# Warning: This is a file generated from the base underscore template file: skydns-svc.yaml.base
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kube-dns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
kubernetes.io/name: "KubeDNS"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
clusterIP: {{ pillar['dns_server'] }}
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
@ -1,130 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ReplicationController
|
|
||||||
metadata:
|
|
||||||
name: kube-dns-v11
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
version: v11
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
|
||||||
replicas: {{ pillar['dns_replicas'] }}
|
|
||||||
selector:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
version: v11
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
version: v11
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: etcd
|
|
||||||
image: gcr.io/google_containers/etcd-{{ arch }}:2.2.1
|
|
||||||
resources:
|
|
||||||
# TODO: Set memory limits when we've profiled the container for large
|
|
||||||
# clusters, then set request = limit to keep this container in
|
|
||||||
# guaranteed class. Currently, this container falls into the
|
|
||||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 500Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 50Mi
|
|
||||||
command:
|
|
||||||
- /usr/local/bin/etcd
|
|
||||||
- -data-dir
|
|
||||||
- /var/etcd/data
|
|
||||||
- -listen-client-urls
|
|
||||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
|
||||||
- -advertise-client-urls
|
|
||||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
|
||||||
- -initial-cluster-token
|
|
||||||
- skydns-etcd
|
|
||||||
volumeMounts:
|
|
||||||
- name: etcd-storage
|
|
||||||
mountPath: /var/etcd/data
|
|
||||||
- name: kube2sky
|
|
||||||
image: gcr.io/google_containers/kube2sky:1.14
|
|
||||||
resources:
|
|
||||||
# TODO: Set memory limits when we've profiled the container for large
|
|
||||||
# clusters, then set request = limit to keep this container in
|
|
||||||
# guaranteed class. Currently, this container falls into the
|
|
||||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
# Kube2sky watches all pods.
|
|
||||||
memory: 200Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 50Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 8080
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
timeoutSeconds: 5
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 5
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /readiness
|
|
||||||
port: 8081
|
|
||||||
scheme: HTTP
|
|
||||||
# we poll on pod startup for the Kubernetes master service and
|
|
||||||
# only setup the /readiness HTTP server once that's available.
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
timeoutSeconds: 5
|
|
||||||
args:
|
|
||||||
# command = "/kube2sky"
|
|
||||||
- --domain={{ pillar['dns_domain'] }}
|
|
||||||
- --kube-master-url=http://{{ private_address }}:8080
|
|
||||||
- name: skydns
|
|
||||||
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
|
|
||||||
resources:
|
|
||||||
# TODO: Set memory limits when we've profiled the container for large
|
|
||||||
# clusters, then set request = limit to keep this container in
|
|
||||||
# guaranteed class. Currently, this container falls into the
|
|
||||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 50Mi
|
|
||||||
args:
|
|
||||||
# command = "/skydns"
|
|
||||||
- -machines=http://127.0.0.1:4001
|
|
||||||
- -addr=0.0.0.0:53
|
|
||||||
- -ns-rotate=false
|
|
||||||
- -domain={{ pillar['dns_domain'] }}.
|
|
||||||
ports:
|
|
||||||
- containerPort: 53
|
|
||||||
name: dns
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 53
|
|
||||||
name: dns-tcp
|
|
||||||
protocol: TCP
|
|
||||||
- name: healthz
|
|
||||||
image: gcr.io/google_containers/exechealthz:1.0
|
|
||||||
resources:
|
|
||||||
# keep request = limit to keep this container in guaranteed class
|
|
||||||
limits:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 20Mi
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 20Mi
|
|
||||||
args:
|
|
||||||
- -cmd=nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} 127.0.0.1 >/dev/null
|
|
||||||
- -port=8080
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
protocol: TCP
|
|
||||||
volumes:
|
|
||||||
- name: etcd-storage
|
|
||||||
emptyDir: {}
|
|
||||||
dnsPolicy: Default # Don't use cluster DNS.
|
|
@ -1,20 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: kube-dns
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
kubernetes.io/name: "KubeDNS"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
clusterIP: {{ pillar['dns_server'] }}
|
|
||||||
ports:
|
|
||||||
- name: dns
|
|
||||||
port: 53
|
|
||||||
protocol: UDP
|
|
||||||
- name: dns-tcp
|
|
||||||
port: 53
|
|
||||||
protocol: TCP
|
|
Loading…
Reference in New Issue
Block a user