mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #93836 from jayunit100/salt_cleanup_92835
remove __pillar__ refs
This commit is contained in:
commit
c1e5e6a556
@ -67,7 +67,7 @@ data:
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes __PILLAR__DNS__DOMAIN__ in-addr.arpa ip6.arpa {
|
||||
kubernetes __DNS__DOMAIN__ in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
@ -136,7 +136,7 @@ spec:
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: __PILLAR__DNS__MEMORY__LIMIT__
|
||||
memory: __DNS__MEMORY__LIMIT__
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
@ -202,7 +202,7 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
clusterIP: __DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
|
@ -67,7 +67,7 @@ data:
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes {{ pillar['dns_domain'] }} in-addr.arpa ip6.arpa {
|
||||
kubernetes dns_domain in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
@ -136,7 +136,7 @@ spec:
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: {{ pillar['dns_memory_limit'] }}
|
||||
memory: 'dns_memory_limit'
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
@ -202,7 +202,7 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
clusterIP: 'dns_server'
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
|
@ -1,5 +1,5 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__PILLAR__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g
|
||||
s/__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
||||
|
@ -1,5 +1,5 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
|
||||
s/__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
||||
|
@ -30,7 +30,7 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
clusterIP: __DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
@ -123,7 +123,7 @@ spec:
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: __PILLAR__DNS__MEMORY__LIMIT__
|
||||
memory: __DNS__MEMORY__LIMIT__
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
@ -146,7 +146,7 @@ spec:
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=__PILLAR__DNS__DOMAIN__.
|
||||
- --domain=__DNS__DOMAIN__.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
@ -193,7 +193,7 @@ spec:
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
|
||||
- --server=/__DNS__DOMAIN__/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
@ -232,8 +232,8 @@ spec:
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__DNS__DOMAIN__,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__DNS__DOMAIN__,5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
|
@ -30,7 +30,7 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
clusterIP: dns_server
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
@ -123,7 +123,7 @@ spec:
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: {{ pillar['dns_memory_limit'] }}
|
||||
memory: 'dns_memory_limit'
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
@ -146,7 +146,7 @@ spec:
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain={{ pillar['dns_domain'] }}.
|
||||
- --domain=dns_domain.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
@ -193,7 +193,7 @@ spec:
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
|
||||
- --server=/dns_domain/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
@ -232,8 +232,8 @@ spec:
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.dns_domain,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.dns_domain,5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
|
@ -1,5 +1,5 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__PILLAR__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g
|
||||
s/__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
||||
|
@ -1,5 +1,5 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
|
||||
s/__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
||||
|
@ -2394,10 +2394,10 @@ function setup-coredns-manifest {
|
||||
local -r coredns_file="${dst_dir}/0-dns/coredns/coredns.yaml"
|
||||
mv "${dst_dir}/0-dns/coredns/coredns.yaml.in" "${coredns_file}"
|
||||
# Replace the salt configurations with variable values.
|
||||
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}"
|
||||
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}"
|
||||
sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${coredns_file}"
|
||||
sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${coredns_file}"
|
||||
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
|
||||
sed -i -e "s@{{ *pillar\['dns_memory_limit'\] *}}@${DNS_MEMORY_LIMIT:-170Mi}@g" "${coredns_file}"
|
||||
sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${coredns_file}"
|
||||
|
||||
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
|
||||
@ -2447,9 +2447,9 @@ EOF
|
||||
update-prometheus-to-sd-parameters "${kubedns_file}"
|
||||
fi
|
||||
# Replace the salt configurations with variable values.
|
||||
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
|
||||
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
|
||||
sed -i -e "s@{{ *pillar\['dns_memory_limit'\] *}}@${DNS_MEMORY_LIMIT:-170Mi}@g" "${kubedns_file}"
|
||||
sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${kubedns_file}"
|
||||
sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${kubedns_file}"
|
||||
sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${kubedns_file}"
|
||||
|
||||
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
|
||||
@ -2464,9 +2464,9 @@ function setup-nodelocaldns-manifest {
|
||||
local -r localdns_file="${dst_dir}/0-dns/nodelocaldns/nodelocaldns.yaml"
|
||||
setup-addon-custom-yaml "addons" "0-dns/nodelocaldns" "nodelocaldns.yaml" "${CUSTOM_NODELOCAL_DNS_YAML:-}"
|
||||
# Replace the sed configurations with variable values.
|
||||
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
|
||||
sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
|
||||
sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
|
||||
sed -i -e "s/_.*_DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
|
||||
sed -i -e "s/_.*_DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
|
||||
sed -i -e "s/_.*_LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
|
||||
}
|
||||
|
||||
# Sets up the manifests of netd for k8s addons.
|
||||
|
@ -886,22 +886,28 @@ EOF
|
||||
function start_kubedns {
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
|
||||
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.in" kube-dns.yaml
|
||||
${SED} -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml
|
||||
${SED} -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml
|
||||
${SED} -i -e "s/{{ pillar\['dns_memory_limit'\] }}/${DNS_MEMORY_LIMIT}/g" kube-dns.yaml
|
||||
${SED} -i -e "s/dns_domain/${DNS_DOMAIN}/g" kube-dns.yaml
|
||||
${SED} -i -e "s/dns_server/${DNS_SERVER_IP}/g" kube-dns.yaml
|
||||
${SED} -i -e "s/dns_memory_limit/${DNS_MEMORY_LIMIT}/g" kube-dns.yaml
|
||||
# TODO update to dns role once we have one.
|
||||
# use kubectl to create kubedns addon
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml
|
||||
echo "Kube-dns addon successfully deployed."
|
||||
if ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml ; then
|
||||
echo "Kube-dns addon successfully deployed."
|
||||
else
|
||||
echo "Something is wrong with your DNS input"
|
||||
cat kube-dns.yaml
|
||||
exit 1
|
||||
fi
|
||||
rm kube-dns.yaml
|
||||
fi
|
||||
}
|
||||
|
||||
function start_nodelocaldns {
|
||||
cp "${KUBE_ROOT}/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml" nodelocaldns.yaml
|
||||
${SED} -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" nodelocaldns.yaml
|
||||
${SED} -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" nodelocaldns.yaml
|
||||
${SED} -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" nodelocaldns.yaml
|
||||
# .* because of the __PILLLAR__ references that eventually will be removed
|
||||
${SED} -i -e "s/_.*_DNS__DOMAIN__/${DNS_DOMAIN}/g" nodelocaldns.yaml
|
||||
${SED} -i -e "s/_.*_DNS__SERVER__/${DNS_SERVER_IP}/g" nodelocaldns.yaml
|
||||
${SED} -i -e "s/_.*_LOCAL__DNS__/${LOCAL_DNS_IP}/g" nodelocaldns.yaml
|
||||
# use kubectl to create nodelocaldns addon
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f nodelocaldns.yaml
|
||||
echo "NodeLocalDNS addon successfully deployed."
|
||||
|
Loading…
Reference in New Issue
Block a user