remove the deprecated kube-dns as an option in kubeadm

This commit is contained in:
Sandeep Rajan 2021-02-08 15:53:29 -05:00
parent 35c233f18d
commit b8a1bd6a6c
18 changed files with 206 additions and 1364 deletions

View File

@ -156,9 +156,6 @@ type DNSAddOnType string
const (
// CoreDNS add-on type
CoreDNS DNSAddOnType = "CoreDNS"
// KubeDNS add-on type
KubeDNS DNSAddOnType = "kube-dns"
)
// DNS defines the DNS addon that should be used in the cluster

View File

@ -145,9 +145,6 @@ type DNSAddOnType string
const (
// CoreDNS add-on type
CoreDNS DNSAddOnType = "CoreDNS"
// KubeDNS add-on type
KubeDNS DNSAddOnType = "kube-dns"
)
// DNS defines the DNS addon that should be used in the cluster

View File

@ -141,9 +141,6 @@ type DNSAddOnType string
const (
// CoreDNS add-on type
CoreDNS DNSAddOnType = "CoreDNS"
// KubeDNS add-on type
KubeDNS DNSAddOnType = "kube-dns"
)
// DNS defines the DNS addon that should be used in the cluster

View File

@ -175,16 +175,6 @@ func TestConfigImagesListRunWithoutPath(t *testing.T) {
},
expectedImages: defaultNumberOfImages,
},
{
name: "kube-dns enabled",
cfg: kubeadmapiv1beta2.ClusterConfiguration{
KubernetesVersion: dummyKubernetesVersionStr,
DNS: kubeadmapiv1beta2.DNS{
Type: kubeadmapiv1beta2.KubeDNS,
},
},
expectedImages: defaultNumberOfImages + 2,
},
}
outputFlags := output.NewOutputFlags(&imageTextPrintFlags{}).WithTypeSetter(outputapischeme.Scheme).WithDefaultOutput(output.TextOutput)

View File

@ -104,7 +104,7 @@ func newCmdApply(apf *applyPlanFlags) *cobra.Command {
// - Upgrades the control plane components
// - Applies the other resources that'd be created with kubeadm init as well, like
// - Creating the RBAC rules for the bootstrap tokens and the cluster-info ConfigMap
// - Applying new kube-dns and kube-proxy manifests
// - Applying new CorDNS and kube-proxy manifests
// - Uploads the newly used configuration to the cluster ConfigMap
func runApply(flags *applyFlags, args []string) error {

View File

@ -177,7 +177,6 @@ func genUpgradePlan(up *upgrade.Upgrade, isExternalEtcd bool) (*outputapi.Upgrad
components = append(components, newComponentUpgradePlan(constants.KubeProxy, up.Before.KubeVersion, up.After.KubeVersion))
components = appendDNSComponent(components, up, kubeadmapi.CoreDNS, constants.CoreDNS)
components = appendDNSComponent(components, up, kubeadmapi.KubeDNS, constants.KubeDNS)
if !isExternalEtcd {
components = append(components, newComponentUpgradePlan(constants.Etcd, up.Before.EtcdVersion, up.After.EtcdVersion))

View File

@ -79,45 +79,45 @@ func TestPrintAvailableUpgrades(t *testing.T) {
name: "Patch version available",
upgrades: []upgrade.Upgrade{
{
Description: "version in the v1.8 series",
Description: "version in the v1.18 series",
Before: upgrade.ClusterState{
KubeVersion: "v1.8.1",
KubeVersion: "v1.18.1",
KubeletVersions: map[string]uint16{
"v1.8.1": 1,
"v1.18.1": 1,
},
KubeadmVersion: "v1.8.2",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.18.1",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.8.3",
KubeadmVersion: "v1.8.3",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeVersion: "v1.18.4",
KubeadmVersion: "v1.18.4",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.8.1 v1.8.3
COMPONENT CURRENT TARGET
kubelet 1 x v1.18.1 v1.18.4
Upgrade to the latest version in the v1.8 series:
Upgrade to the latest version in the v1.18 series:
COMPONENT CURRENT TARGET
kube-apiserver v1.8.1 v1.8.3
kube-controller-manager v1.8.1 v1.8.3
kube-scheduler v1.8.1 v1.8.3
kube-proxy v1.8.1 v1.8.3
kube-dns 1.14.5 1.14.5
etcd 3.0.17 3.0.17
kube-apiserver v1.18.1 v1.18.4
kube-controller-manager v1.18.1 v1.18.4
kube-scheduler v1.18.1 v1.18.4
kube-proxy v1.18.1 v1.18.4
CoreDNS 1.6.7 1.6.7
etcd 3.4.3-0 3.4.3-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.8.3
kubeadm upgrade apply v1.18.4
Note: Before you can perform this upgrade, you have to update kubeadm to v1.8.3.
Note: Before you can perform this upgrade, you have to update kubeadm to v1.18.4.
_____________________________________________________________________
@ -129,41 +129,43 @@ _____________________________________________________________________
{
Description: "stable version",
Before: upgrade.ClusterState{
KubeVersion: "v1.8.3",
KubeVersion: "v1.18.4",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
"v1.18.4": 1,
},
KubeadmVersion: "v1.9.0",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.18.4",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.9.0",
KubeadmVersion: "v1.9.0",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.13",
EtcdVersion: "3.1.12",
KubeVersion: "v1.19.0",
KubeadmVersion: "v1.19.0",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.8.3 v1.9.0
COMPONENT CURRENT TARGET
kubelet 1 x v1.18.4 v1.19.0
Upgrade to the latest stable version:
COMPONENT CURRENT TARGET
kube-apiserver v1.8.3 v1.9.0
kube-controller-manager v1.8.3 v1.9.0
kube-scheduler v1.8.3 v1.9.0
kube-proxy v1.8.3 v1.9.0
kube-dns 1.14.5 1.14.13
etcd 3.0.17 3.1.12
kube-apiserver v1.18.4 v1.19.0
kube-controller-manager v1.18.4 v1.19.0
kube-scheduler v1.18.4 v1.19.0
kube-proxy v1.18.4 v1.19.0
CoreDNS 1.6.7 1.7.0
etcd 3.4.3-0 3.4.7-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.9.0
kubeadm upgrade apply v1.19.0
Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.0.
_____________________________________________________________________
@ -173,85 +175,85 @@ _____________________________________________________________________
name: "patch and minor version available",
upgrades: []upgrade.Upgrade{
{
Description: "version in the v1.8 series",
Description: "version in the v1.18 series",
Before: upgrade.ClusterState{
KubeVersion: "v1.8.3",
KubeVersion: "v1.18.3",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
"v1.18.3": 1,
},
KubeadmVersion: "v1.8.3",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.18.3",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.8.5",
KubeadmVersion: "v1.8.3",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeVersion: "v1.18.5",
KubeadmVersion: "v1.18.3",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
},
{
Description: "stable version",
Before: upgrade.ClusterState{
KubeVersion: "v1.8.3",
KubeVersion: "v1.18.3",
KubeletVersions: map[string]uint16{
"v1.8.3": 1,
"v1.18.3": 1,
},
KubeadmVersion: "v1.8.3",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.18.3",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.9.0",
KubeadmVersion: "v1.9.0",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.13",
EtcdVersion: "3.1.12",
KubeVersion: "v1.19.0",
KubeadmVersion: "v1.19.0",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.8.3 v1.8.5
COMPONENT CURRENT TARGET
kubelet 1 x v1.18.3 v1.18.5
Upgrade to the latest version in the v1.8 series:
Upgrade to the latest version in the v1.18 series:
COMPONENT CURRENT TARGET
kube-apiserver v1.8.3 v1.8.5
kube-controller-manager v1.8.3 v1.8.5
kube-scheduler v1.8.3 v1.8.5
kube-proxy v1.8.3 v1.8.5
kube-dns 1.14.5 1.14.5
etcd 3.0.17 3.0.17
kube-apiserver v1.18.3 v1.18.5
kube-controller-manager v1.18.3 v1.18.5
kube-scheduler v1.18.3 v1.18.5
kube-proxy v1.18.3 v1.18.5
CoreDNS 1.6.7 1.6.7
etcd 3.4.3-0 3.4.3-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.8.5
kubeadm upgrade apply v1.18.5
_____________________________________________________________________
Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.8.3 v1.9.0
COMPONENT CURRENT TARGET
kubelet 1 x v1.18.3 v1.19.0
Upgrade to the latest stable version:
COMPONENT CURRENT TARGET
kube-apiserver v1.8.3 v1.9.0
kube-controller-manager v1.8.3 v1.9.0
kube-scheduler v1.8.3 v1.9.0
kube-proxy v1.8.3 v1.9.0
kube-dns 1.14.5 1.14.13
etcd 3.0.17 3.1.12
kube-apiserver v1.18.3 v1.19.0
kube-controller-manager v1.18.3 v1.19.0
kube-scheduler v1.18.3 v1.19.0
kube-proxy v1.18.3 v1.19.0
CoreDNS 1.6.7 1.7.0
etcd 3.4.3-0 3.4.7-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.9.0
kubeadm upgrade apply v1.19.0
Note: Before you can perform this upgrade, you have to update kubeadm to v1.9.0.
Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.0.
_____________________________________________________________________
@ -263,43 +265,43 @@ _____________________________________________________________________
{
Description: "experimental version",
Before: upgrade.ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.18.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.18.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.18.5",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.9.0-beta.1",
KubeadmVersion: "v1.9.0-beta.1",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.13",
EtcdVersion: "3.1.12",
KubeVersion: "v1.19.0-beta.1",
KubeadmVersion: "v1.19.0-beta.1",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.8.5 v1.9.0-beta.1
COMPONENT CURRENT TARGET
kubelet 1 x v1.18.5 v1.19.0-beta.1
Upgrade to the latest experimental version:
COMPONENT CURRENT TARGET
kube-apiserver v1.8.5 v1.9.0-beta.1
kube-controller-manager v1.8.5 v1.9.0-beta.1
kube-scheduler v1.8.5 v1.9.0-beta.1
kube-proxy v1.8.5 v1.9.0-beta.1
kube-dns 1.14.5 1.14.13
etcd 3.0.17 3.1.12
kube-apiserver v1.18.5 v1.19.0-beta.1
kube-controller-manager v1.18.5 v1.19.0-beta.1
kube-scheduler v1.18.5 v1.19.0-beta.1
kube-proxy v1.18.5 v1.19.0-beta.1
CoreDNS 1.6.7 1.7.0
etcd 3.4.3-0 3.4.7-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.9.0-beta.1 --allow-experimental-upgrades
kubeadm upgrade apply v1.19.0-beta.1 --allow-experimental-upgrades
Note: Before you can perform this upgrade, you have to update kubeadm to v1.9.0-beta.1.
Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.0-beta.1.
_____________________________________________________________________
@ -311,43 +313,43 @@ _____________________________________________________________________
{
Description: "release candidate version",
Before: upgrade.ClusterState{
KubeVersion: "v1.8.5",
KubeVersion: "v1.18.5",
KubeletVersions: map[string]uint16{
"v1.8.5": 1,
"v1.18.5": 1,
},
KubeadmVersion: "v1.8.5",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.18.5",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.6.7",
EtcdVersion: "3.4.3-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.9.0-rc.1",
KubeadmVersion: "v1.9.0-rc.1",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.13",
EtcdVersion: "3.1.12",
KubeVersion: "v1.19.0-rc.1",
KubeadmVersion: "v1.19.0-rc.1",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.8.5 v1.9.0-rc.1
COMPONENT CURRENT TARGET
kubelet 1 x v1.18.5 v1.19.0-rc.1
Upgrade to the latest release candidate version:
COMPONENT CURRENT TARGET
kube-apiserver v1.8.5 v1.9.0-rc.1
kube-controller-manager v1.8.5 v1.9.0-rc.1
kube-scheduler v1.8.5 v1.9.0-rc.1
kube-proxy v1.8.5 v1.9.0-rc.1
kube-dns 1.14.5 1.14.13
etcd 3.0.17 3.1.12
kube-apiserver v1.18.5 v1.19.0-rc.1
kube-controller-manager v1.18.5 v1.19.0-rc.1
kube-scheduler v1.18.5 v1.19.0-rc.1
kube-proxy v1.18.5 v1.19.0-rc.1
CoreDNS 1.6.7 1.7.0
etcd 3.4.3-0 3.4.7-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.9.0-rc.1 --allow-release-candidate-upgrades
kubeadm upgrade apply v1.19.0-rc.1 --allow-release-candidate-upgrades
Note: Before you can perform this upgrade, you have to update kubeadm to v1.9.0-rc.1.
Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.0-rc.1.
_____________________________________________________________________
@ -357,47 +359,47 @@ _____________________________________________________________________
name: "multiple kubelet versions",
upgrades: []upgrade.Upgrade{
{
Description: "version in the v1.9 series",
Description: "version in the v1.19 series",
Before: upgrade.ClusterState{
KubeVersion: "v1.9.2",
KubeVersion: "v1.19.2",
KubeletVersions: map[string]uint16{
"v1.9.2": 1,
"v1.9.3": 2,
"v1.19.2": 1,
"v1.19.3": 2,
},
KubeadmVersion: "v1.9.2",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.19.2",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.9.3",
KubeadmVersion: "v1.9.3",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.8",
EtcdVersion: "3.1.12",
KubeVersion: "v1.19.3",
KubeadmVersion: "v1.19.3",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.9.2 v1.9.3
2 x v1.9.3 v1.9.3
COMPONENT CURRENT TARGET
kubelet 1 x v1.19.2 v1.19.3
2 x v1.19.3 v1.19.3
Upgrade to the latest version in the v1.9 series:
Upgrade to the latest version in the v1.19 series:
COMPONENT CURRENT TARGET
kube-apiserver v1.9.2 v1.9.3
kube-controller-manager v1.9.2 v1.9.3
kube-scheduler v1.9.2 v1.9.3
kube-proxy v1.9.2 v1.9.3
kube-dns 1.14.5 1.14.8
etcd 3.0.17 3.1.12
kube-apiserver v1.19.2 v1.19.3
kube-controller-manager v1.19.2 v1.19.3
kube-scheduler v1.19.2 v1.19.3
kube-proxy v1.19.2 v1.19.3
CoreDNS 1.7.0 1.7.0
etcd 3.4.7-0 3.4.7-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.9.3
kubeadm upgrade apply v1.19.3
Note: Before you can perform this upgrade, you have to update kubeadm to v1.9.3.
Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.3.
_____________________________________________________________________
@ -408,185 +410,45 @@ _____________________________________________________________________
name: "external etcd upgrade available",
upgrades: []upgrade.Upgrade{
{
Description: "version in the v1.9 series",
Description: "version in the v1.19 series",
Before: upgrade.ClusterState{
KubeVersion: "v1.9.2",
KubeVersion: "v1.19.2",
KubeletVersions: map[string]uint16{
"v1.9.2": 1,
"v1.19.2": 1,
},
KubeadmVersion: "v1.9.2",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.5",
EtcdVersion: "3.0.17",
KubeadmVersion: "v1.19.2",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
After: upgrade.ClusterState{
KubeVersion: "v1.9.3",
KubeadmVersion: "v1.9.3",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.8",
EtcdVersion: "3.1.12",
KubeVersion: "v1.19.3",
KubeadmVersion: "v1.19.3",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.7.0",
EtcdVersion: "3.4.7-0",
},
},
},
externalEtcd: true,
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.9.2 v1.9.3
Upgrade to the latest version in the v1.9 series:
COMPONENT CURRENT TARGET
kube-apiserver v1.9.2 v1.9.3
kube-controller-manager v1.9.2 v1.9.3
kube-scheduler v1.9.2 v1.9.3
kube-proxy v1.9.2 v1.9.3
kube-dns 1.14.5 1.14.8
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.9.3
Note: Before you can perform this upgrade, you have to update kubeadm to v1.9.3.
_____________________________________________________________________
`),
},
{
name: "kubedns to coredns",
upgrades: []upgrade.Upgrade{
{
Description: "kubedns to coredns",
Before: upgrade.ClusterState{
KubeVersion: "v1.10.2",
KubeletVersions: map[string]uint16{
"v1.10.2": 1,
},
KubeadmVersion: "v1.11.0",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.7",
EtcdVersion: "3.1.11",
},
After: upgrade.ClusterState{
KubeVersion: "v1.11.0",
KubeadmVersion: "v1.11.0",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.0.6",
EtcdVersion: "3.2.18",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.10.2 v1.11.0
kubelet 1 x v1.19.2 v1.19.3
Upgrade to the latest kubedns to coredns:
Upgrade to the latest version in the v1.19 series:
COMPONENT CURRENT TARGET
kube-apiserver v1.10.2 v1.11.0
kube-controller-manager v1.10.2 v1.11.0
kube-scheduler v1.10.2 v1.11.0
kube-proxy v1.10.2 v1.11.0
CoreDNS 1.0.6
kube-dns 1.14.7
etcd 3.1.11 3.2.18
kube-apiserver v1.19.2 v1.19.3
kube-controller-manager v1.19.2 v1.19.3
kube-scheduler v1.19.2 v1.19.3
kube-proxy v1.19.2 v1.19.3
CoreDNS 1.7.0 1.7.0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.11.0
kubeadm upgrade apply v1.19.3
_____________________________________________________________________
`),
},
{
name: "coredns",
upgrades: []upgrade.Upgrade{
{
Description: "coredns",
Before: upgrade.ClusterState{
KubeVersion: "v1.10.2",
KubeletVersions: map[string]uint16{
"v1.10.2": 1,
},
KubeadmVersion: "v1.11.0",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.0.5",
EtcdVersion: "3.1.11",
},
After: upgrade.ClusterState{
KubeVersion: "v1.11.0",
KubeadmVersion: "v1.11.0",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.0.6",
EtcdVersion: "3.2.18",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.10.2 v1.11.0
Upgrade to the latest coredns:
COMPONENT CURRENT TARGET
kube-apiserver v1.10.2 v1.11.0
kube-controller-manager v1.10.2 v1.11.0
kube-scheduler v1.10.2 v1.11.0
kube-proxy v1.10.2 v1.11.0
CoreDNS 1.0.5 1.0.6
etcd 3.1.11 3.2.18
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.11.0
_____________________________________________________________________
`),
},
{
name: "coredns to kubedns",
upgrades: []upgrade.Upgrade{
{
Description: "coredns to kubedns",
Before: upgrade.ClusterState{
KubeVersion: "v1.10.2",
KubeletVersions: map[string]uint16{
"v1.10.2": 1,
},
KubeadmVersion: "v1.11.0",
DNSType: kubeadmapi.CoreDNS,
DNSVersion: "1.0.6",
EtcdVersion: "3.1.11",
},
After: upgrade.ClusterState{
KubeVersion: "v1.11.0",
KubeadmVersion: "v1.11.0",
DNSType: kubeadmapi.KubeDNS,
DNSVersion: "1.14.9",
EtcdVersion: "3.2.18",
},
},
},
expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT TARGET
kubelet 1 x v1.10.2 v1.11.0
Upgrade to the latest coredns to kubedns:
COMPONENT CURRENT TARGET
kube-apiserver v1.10.2 v1.11.0
kube-controller-manager v1.10.2 v1.11.0
kube-scheduler v1.10.2 v1.11.0
kube-proxy v1.10.2 v1.11.0
CoreDNS 1.0.6
kube-dns 1.14.9
etcd 3.1.11 3.2.18
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.11.0
Note: Before you can perform this upgrade, you have to update kubeadm to v1.19.3.
_____________________________________________________________________

View File

@ -302,8 +302,6 @@ const (
HyperKube = "hyperkube"
// CoreDNS defines variable used internally when referring to the CoreDNS component
CoreDNS = "CoreDNS"
// KubeDNS defines variable used internally when referring to the KubeDNS component
KubeDNS = "kube-dns"
// Kubelet defines variable used internally when referring to the Kubelet
Kubelet = "kubelet"
@ -328,24 +326,6 @@ const (
// CoreDNSImageName specifies the name of the image for CoreDNS add-on
CoreDNSImageName = "coredns/coredns"
// KubeDNSConfigMap specifies in what ConfigMap in the kube-system namespace the kube-dns config should be stored
KubeDNSConfigMap = "kube-dns"
// KubeDNSDeploymentName specifies the name of the Deployment for kube-dns add-on
KubeDNSDeploymentName = "kube-dns"
// KubeDNSKubeDNSImageName specifies the name of the image for the kubedns container in the kube-dns add-on
KubeDNSKubeDNSImageName = "k8s-dns-kube-dns"
// KubeDNSSidecarImageName specifies the name of the image for the sidecar container in the kube-dns add-on
KubeDNSSidecarImageName = "k8s-dns-sidecar"
// KubeDNSDnsMasqNannyImageName specifies the name of the image for the dnsmasq container in the kube-dns add-on
KubeDNSDnsMasqNannyImageName = "k8s-dns-dnsmasq-nanny"
// KubeDNSVersion is the version of kube-dns to be deployed if it is used
KubeDNSVersion = "1.14.13"
// CoreDNSVersion is the version of CoreDNS to be deployed if it is used
CoreDNSVersion = "v1.8.0"
@ -654,12 +634,10 @@ func GetAPIServerVirtualIP(svcSubnetList string, isDualStack bool) (net.IP, erro
// GetDNSVersion is a handy function that returns the DNS version by DNS type
func GetDNSVersion(dnsType kubeadmapi.DNSAddOnType) string {
switch dnsType {
case kubeadmapi.KubeDNS:
return KubeDNSVersion
default:
if dnsType == kubeadmapi.CoreDNS {
return CoreDNSVersion
}
return ""
}
// GetKubeletConfigMapName returns the right ConfigMap name for the right branch of k8s

View File

@ -172,15 +172,11 @@ func TestEtcdSupportedVersion(t *testing.T) {
}
}
func TestGetKubeDNSVersion(t *testing.T) {
func TestGetDNSVersion(t *testing.T) {
var tests = []struct {
dns kubeadmapi.DNSAddOnType
expected string
}{
{
dns: kubeadmapi.KubeDNS,
expected: KubeDNSVersion,
},
{
dns: kubeadmapi.CoreDNS,
expected: CoreDNSVersion,

View File

@ -44,9 +44,8 @@ func GetKubernetesImage(image string, cfg *kubeadmapi.ClusterConfiguration) stri
return GetGenericImage(repoPrefix, image, kubernetesImageTag)
}
// GetDNSImage generates and returns the image for the DNS, that can be CoreDNS or kube-dns.
// Given that kube-dns uses 3 containers, an additional imageName parameter was added
func GetDNSImage(cfg *kubeadmapi.ClusterConfiguration, imageName string) string {
// GetDNSImage generates and returns the image for CoreDNS.
func GetDNSImage(cfg *kubeadmapi.ClusterConfiguration) string {
// DNS uses default image repository by default
dnsImageRepository := cfg.ImageRepository
// unless an override is specified
@ -60,7 +59,7 @@ func GetDNSImage(cfg *kubeadmapi.ClusterConfiguration, imageName string) string
if cfg.DNS.ImageTag != "" {
dnsImageTag = cfg.DNS.ImageTag
}
return GetGenericImage(dnsImageRepository, imageName, dnsImageTag)
return GetGenericImage(dnsImageRepository, constants.CoreDNSImageName, dnsImageTag)
}
// GetEtcdImage generates and returns the image for etcd
@ -112,11 +111,7 @@ func GetControlPlaneImages(cfg *kubeadmapi.ClusterConfiguration) []string {
// Append the appropriate DNS images
if cfg.DNS.Type == kubeadmapi.CoreDNS {
imgs = append(imgs, GetDNSImage(cfg, constants.CoreDNSImageName))
} else {
imgs = append(imgs, GetDNSImage(cfg, constants.KubeDNSKubeDNSImageName))
imgs = append(imgs, GetDNSImage(cfg, constants.KubeDNSSidecarImageName))
imgs = append(imgs, GetDNSImage(cfg, constants.KubeDNSDnsMasqNannyImageName))
imgs = append(imgs, GetDNSImage(cfg))
}
return imgs

View File

@ -224,33 +224,6 @@ func TestGetAllImages(t *testing.T) {
},
expect: constants.CoreDNSImageName,
},
{
name: "main kube-dns image is returned",
cfg: &kubeadmapi.ClusterConfiguration{
DNS: kubeadmapi.DNS{
Type: kubeadmapi.KubeDNS,
},
},
expect: constants.KubeDNSKubeDNSImageName,
},
{
name: "kube-dns sidecar image is returned",
cfg: &kubeadmapi.ClusterConfiguration{
DNS: kubeadmapi.DNS{
Type: kubeadmapi.KubeDNS,
},
},
expect: constants.KubeDNSSidecarImageName,
},
{
name: "kube-dns dnsmasq-nanny image is returned",
cfg: &kubeadmapi.ClusterConfiguration{
DNS: kubeadmapi.DNS{
Type: kubeadmapi.KubeDNS,
},
},
expect: constants.KubeDNSDnsMasqNannyImageName,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {

View File

@ -18,12 +18,9 @@ package dns
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"github.com/caddyserver/caddy/caddyfile"
"github.com/coredns/corefile-migration/migration"
"github.com/pkg/errors"
apps "k8s.io/api/apps/v1"
@ -42,17 +39,11 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/images"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
utilsnet "k8s.io/utils/net"
)
const (
// KubeDNSServiceAccountName describes the name of the ServiceAccount for the kube-dns addon
KubeDNSServiceAccountName = "kube-dns"
kubeDNSStubDomain = "stubDomains"
kubeDNSUpstreamNameservers = "upstreamNameservers"
unableToDecodeCoreDNS = "unable to decode CoreDNS"
coreDNSReplicas = 2
kubeDNSReplicas = 1
unableToDecodeCoreDNS = "unable to decode CoreDNS"
coreDNSReplicas = 2
)
// DeployedDNSAddon returns the type of DNS addon currently deployed
@ -67,11 +58,7 @@ func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, stri
case 0:
return "", "", nil
case 1:
addonName := deployments.Items[0].Name
addonType := kubeadmapi.CoreDNS
if addonName == kubeadmconstants.KubeDNSDeploymentName {
addonType = kubeadmapi.KubeDNS
}
addonImage := deployments.Items[0].Spec.Template.Spec.Containers[0].Image
addonImageParts := strings.Split(addonImage, ":")
addonVersion := addonImageParts[len(addonImageParts)-1]
@ -98,101 +85,13 @@ func deployedDNSReplicas(client clientset.Interface, replicas int32) (*int32, er
}
}
// EnsureDNSAddon creates the kube-dns or CoreDNS addon
// EnsureDNSAddon creates the CoreDNS addon
func EnsureDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error {
if cfg.DNS.Type == kubeadmapi.CoreDNS {
replicas, err := deployedDNSReplicas(client, coreDNSReplicas)
if err != nil {
return err
}
return coreDNSAddon(cfg, client, replicas)
}
replicas, err := deployedDNSReplicas(client, kubeDNSReplicas)
replicas, err := deployedDNSReplicas(client, coreDNSReplicas)
if err != nil {
return err
}
return kubeDNSAddon(cfg, client, replicas)
}
func kubeDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, replicas *int32) error {
if err := CreateServiceAccount(client); err != nil {
return err
}
dnsip, err := kubeadmconstants.GetDNSIP(cfg.Networking.ServiceSubnet, features.Enabled(cfg.FeatureGates, features.IPv6DualStack))
if err != nil {
return err
}
var dnsBindAddr, dnsProbeAddr string
if utilsnet.IsIPv6(dnsip) {
dnsBindAddr = "::1"
dnsProbeAddr = "[" + dnsBindAddr + "]"
} else {
dnsBindAddr = "127.0.0.1"
dnsProbeAddr = dnsBindAddr
}
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment,
struct {
DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, OldControlPlaneTaintKey, ControlPlaneTaintKey string
Replicas *int32
}{
DeploymentName: kubeadmconstants.KubeDNSDeploymentName,
KubeDNSImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSKubeDNSImageName),
DNSMasqImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSDnsMasqNannyImageName),
SidecarImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSSidecarImageName),
DNSBindAddr: dnsBindAddr,
DNSProbeAddr: dnsProbeAddr,
DNSDomain: cfg.Networking.DNSDomain,
// TODO: https://github.com/kubernetes/kubeadm/issues/2200
OldControlPlaneTaintKey: kubeadmconstants.LabelNodeRoleOldControlPlane,
ControlPlaneTaintKey: kubeadmconstants.LabelNodeRoleControlPlane,
Replicas: replicas,
})
if err != nil {
return errors.Wrap(err, "error when parsing kube-dns deployment template")
}
dnsServiceBytes, err := kubeadmutil.ParseTemplate(KubeDNSService, struct{ DNSIP string }{
DNSIP: dnsip.String(),
})
if err != nil {
return errors.Wrap(err, "error when parsing kube-proxy configmap template")
}
if err := createKubeDNSAddon(dnsDeploymentBytes, dnsServiceBytes, client); err != nil {
return err
}
fmt.Println("[addons] WARNING: kube-dns is deprecated and will not be supported in a future version")
fmt.Println("[addons] Applied essential addon: kube-dns")
return nil
}
// CreateServiceAccount creates the necessary serviceaccounts that kubeadm uses/might use, if they don't already exist.
func CreateServiceAccount(client clientset.Interface) error {
return apiclient.CreateOrUpdateServiceAccount(client, &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: KubeDNSServiceAccountName,
Namespace: metav1.NamespaceSystem,
},
})
}
func createKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.Interface) error {
kubednsDeployment := &apps.Deployment{}
if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), deploymentBytes, kubednsDeployment); err != nil {
return errors.Wrap(err, "unable to decode kube-dns deployment")
}
// Create the Deployment for kube-dns or update it in case it already exists
if err := apiclient.CreateOrUpdateDeployment(client, kubednsDeployment); err != nil {
return err
}
kubednsService := &v1.Service{}
return createDNSService(kubednsService, serviceBytes, client)
return coreDNSAddon(cfg, client, replicas)
}
func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, replicas *int32) error {
@ -202,7 +101,7 @@ func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interfa
Replicas *int32
}{
DeploymentName: kubeadmconstants.CoreDNSDeploymentName,
Image: images.GetDNSImage(cfg, kubeadmconstants.CoreDNSImageName),
Image: images.GetDNSImage(cfg),
// TODO: https://github.com/kubernetes/kubeadm/issues/2200
OldControlPlaneTaintKey: kubeadmconstants.LabelNodeRoleOldControlPlane,
ControlPlaneTaintKey: kubeadmconstants.LabelNodeRoleControlPlane,
@ -212,28 +111,9 @@ func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interfa
return errors.Wrap(err, "error when parsing CoreDNS deployment template")
}
// Get the kube-dns ConfigMap for translation to equivalent CoreDNS Config.
kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeDNSConfigMap, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
stubDomain, err := translateStubDomainOfKubeDNSToForwardCoreDNS(kubeDNSStubDomain, kubeDNSConfigMap)
if err != nil {
return err
}
upstreamNameserver, err := translateUpstreamNameServerOfKubeDNSToUpstreamForwardCoreDNS(kubeDNSUpstreamNameservers, kubeDNSConfigMap)
if err != nil {
return err
}
coreDNSDomain := cfg.Networking.DNSDomain
// Get the config file for CoreDNS
coreDNSConfigMapBytes, err := kubeadmutil.ParseTemplate(CoreDNSConfigMap, struct{ DNSDomain, UpstreamNameserver, StubDomain string }{
DNSDomain: coreDNSDomain,
UpstreamNameserver: upstreamNameserver,
StubDomain: stubDomain,
DNSDomain: cfg.Networking.DNSDomain,
})
if err != nil {
return errors.Wrap(err, "error when parsing CoreDNS configMap template")
@ -244,7 +124,7 @@ func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interfa
return err
}
coreDNSServiceBytes, err := kubeadmutil.ParseTemplate(KubeDNSService, struct{ DNSIP string }{
coreDNSServiceBytes, err := kubeadmutil.ParseTemplate(CoreDNSService, struct{ DNSIP string }{
DNSIP: dnsip.String(),
})
@ -473,116 +353,3 @@ func setCorefile(client clientset.Interface, coreDNSCorefileName string) error {
}
return nil
}
// translateStubDomainOfKubeDNSToForwardCoreDNS translates StubDomain Data in kube-dns ConfigMap
// in the form of Proxy for the CoreDNS Corefile.
func translateStubDomainOfKubeDNSToForwardCoreDNS(dataField string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
if kubeDNSConfigMap == nil {
return "", nil
}
if proxy, ok := kubeDNSConfigMap.Data[dataField]; ok {
stubDomainData := make(map[string][]string)
err := json.Unmarshal([]byte(proxy), &stubDomainData)
if err != nil {
return "", errors.Wrap(err, "failed to parse JSON from 'kube-dns ConfigMap")
}
var proxyStanza []interface{}
for domain, proxyHosts := range stubDomainData {
proxyIP, err := omitHostnameInTranslation(proxyHosts)
if err != nil {
return "", errors.Wrap(err, "invalid format to parse for proxy")
}
if len(proxyIP) == 0 {
continue
}
pStanza := map[string]interface{}{}
pStanza["keys"] = []string{domain + ":53"}
pStanza["body"] = [][]string{
{"errors"},
{"cache", "30"},
{"loop"},
append([]string{"forward", "."}, proxyIP...),
}
proxyStanza = append(proxyStanza, pStanza)
}
stanzasBytes, err := json.Marshal(proxyStanza)
if err != nil {
return "", err
}
corefileStanza, err := caddyfile.FromJSON(stanzasBytes)
if err != nil {
return "", err
}
return prepCorefileFormat(string(corefileStanza), 4), nil
}
return "", nil
}
// translateUpstreamNameServerOfKubeDNSToUpstreamForwardCoreDNS translates UpstreamNameServer Data in kube-dns ConfigMap
// in the form of Proxy for the CoreDNS Corefile.
func translateUpstreamNameServerOfKubeDNSToUpstreamForwardCoreDNS(dataField string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
if kubeDNSConfigMap == nil {
return "/etc/resolv.conf", nil
}
if upstreamValues, ok := kubeDNSConfigMap.Data[dataField]; ok {
var upstreamProxyValues []string
err := json.Unmarshal([]byte(upstreamValues), &upstreamProxyValues)
if err != nil {
return "", errors.Wrap(err, "failed to parse JSON from 'kube-dns ConfigMap")
}
upstreamProxyValues, err = omitHostnameInTranslation(upstreamProxyValues)
if err != nil {
return "", errors.Wrap(err, "invalid format to parse for proxy")
}
coreDNSProxyStanzaList := strings.Join(upstreamProxyValues, " ")
return coreDNSProxyStanzaList, nil
}
return "/etc/resolv.conf", nil
}
// prepCorefileFormat indents the output of the Corefile caddytext and replaces tabs with spaces
// to neatly format the configmap, making it readable.
func prepCorefileFormat(s string, indentation int) string {
var r []string
if s == "" {
return ""
}
for _, line := range strings.Split(s, "\n") {
indented := strings.Repeat(" ", indentation) + line
r = append(r, indented)
}
corefile := strings.Join(r, "\n")
return "\n" + strings.Replace(corefile, "\t", " ", -1)
}
// omitHostnameInTranslation checks if the data extracted from the kube-dns ConfigMap contains a valid
// IP address. Hostname to nameservers is not supported on CoreDNS and will
// skip that particular instance, if there is any hostname present.
func omitHostnameInTranslation(forwardIPs []string) ([]string, error) {
index := 0
for _, value := range forwardIPs {
proxyHost, _, err := kubeadmutil.ParseHostPort(value)
if err != nil {
return nil, err
}
parseIP := net.ParseIP(proxyHost)
if parseIP == nil {
klog.Warningf("your kube-dns configuration contains a hostname %v. It will be omitted in the translation to CoreDNS as hostnames are unsupported", proxyHost)
} else {
forwardIPs[index] = value
index++
}
}
forwardIPs = forwardIPs[:index]
return forwardIPs, nil
}

View File

@ -23,74 +23,14 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientsetfake "k8s.io/client-go/kubernetes/fake"
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
core "k8s.io/client-go/testing"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
func TestCreateServiceAccount(t *testing.T) {
tests := []struct {
name string
createErr error
expectErr bool
}{
{
"error-free case",
nil,
false,
},
{
"duplication errors should be ignored",
apierrors.NewAlreadyExists(schema.GroupResource{}, ""),
false,
},
{
"unexpected errors should be returned",
apierrors.NewUnauthorized(""),
true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset()
if tc.createErr != nil {
client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.createErr
})
}
err := CreateServiceAccount(client)
if tc.expectErr {
if err == nil {
t.Errorf("CreateServiceAccounts(%s) wanted err, got nil", tc.name)
}
return
} else if !tc.expectErr && err != nil {
t.Errorf("CreateServiceAccounts(%s) returned unexpected err: %v", tc.name, err)
}
wantResourcesCreated := 1
if len(client.Actions()) != wantResourcesCreated {
t.Errorf("CreateServiceAccounts(%s) should have made %d actions, but made %d", tc.name, wantResourcesCreated, len(client.Actions()))
}
for _, action := range client.Actions() {
if action.GetVerb() != "create" || action.GetResource().Resource != "serviceaccounts" {
t.Errorf("CreateServiceAccounts(%s) called [%v %v], but wanted [create serviceaccounts]",
tc.name, action.GetVerb(), action.GetResource().Resource)
}
}
})
}
}
func TestCompileManifests(t *testing.T) {
replicas := int32(coreDNSReplicas)
var tests = []struct {
@ -98,32 +38,6 @@ func TestCompileManifests(t *testing.T) {
manifest string
data interface{}
}{
{
name: "KubeDNSDeployment manifest",
manifest: KubeDNSDeployment,
data: struct {
DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, OldControlPlaneTaintKey, ControlPlaneTaintKey string
Replicas *int32
}{
DeploymentName: "foo",
KubeDNSImage: "foo",
DNSMasqImage: "foo",
SidecarImage: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
OldControlPlaneTaintKey: "foo",
ControlPlaneTaintKey: "foo",
Replicas: &replicas,
},
},
{
name: "KubeDNSService manifest",
manifest: KubeDNSService,
data: struct{ DNSIP string }{
DNSIP: "foo",
},
},
{
name: "CoreDNSDeployment manifest",
manifest: CoreDNSDeployment,
@ -141,10 +55,8 @@ func TestCompileManifests(t *testing.T) {
{
name: "CoreDNSConfigMap manifest",
manifest: CoreDNSConfigMap,
data: struct{ DNSDomain, UpstreamNameserver, StubDomain string }{
DNSDomain: "foo",
UpstreamNameserver: "foo",
StubDomain: "foo",
data: struct{ DNSDomain string }{
DNSDomain: "foo",
},
},
}
@ -207,296 +119,6 @@ func TestGetDNSIP(t *testing.T) {
}
}
func TestTranslateStubDomainKubeDNSToCoreDNS(t *testing.T) {
testCases := []struct {
name string
configMap *v1.ConfigMap
expectOne string
expectTwo string
}{
{
name: "valid call with multiple IPs",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"stubDomains": `{"foo.com" : ["1.2.3.4:5300","3.3.3.3"], "my.cluster.local" : ["2.3.4.5"]}`,
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
},
},
expectOne: `
foo.com:53 {
errors
cache 30
loop
forward . 1.2.3.4:5300 3.3.3.3
}
my.cluster.local:53 {
errors
cache 30
loop
forward . 2.3.4.5
}`,
expectTwo: `
my.cluster.local:53 {
errors
cache 30
loop
forward . 2.3.4.5
}
foo.com:53 {
errors
cache 30
loop
forward . 1.2.3.4:5300 3.3.3.3
}`,
},
{
name: "empty call",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kubedns",
Namespace: "kube-system",
},
},
expectOne: "",
},
{
name: "valid call",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"stubDomains": `{"foo.com" : ["1.2.3.4:5300"], "my.cluster.local" : ["2.3.4.5"]}`,
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
},
},
expectOne: `
foo.com:53 {
errors
cache 30
loop
forward . 1.2.3.4:5300
}
my.cluster.local:53 {
errors
cache 30
loop
forward . 2.3.4.5
}`,
expectTwo: `
my.cluster.local:53 {
errors
cache 30
loop
forward . 2.3.4.5
}
foo.com:53 {
errors
cache 30
loop
forward . 1.2.3.4:5300
}`,
},
{
name: "If Hostname present: Omit Hostname",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"stubDomains": `{"bar.com" : ["1.2.3.4:5300","service.consul"], "my.cluster.local" : ["2.3.4.5"], "foo.com" : ["service.consul"]}`,
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
},
},
expectOne: `
bar.com:53 {
errors
cache 30
loop
forward . 1.2.3.4:5300
}
my.cluster.local:53 {
errors
cache 30
loop
forward . 2.3.4.5
}`,
expectTwo: `
my.cluster.local:53 {
errors
cache 30
loop
forward . 2.3.4.5
}
bar.com:53 {
errors
cache 30
loop
forward . 1.2.3.4:5300
}`,
},
{
name: "All hostname: return empty",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"stubDomains": `{"foo.com" : ["service.consul"], "my.cluster.local" : ["ns.foo.com"]}`,
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
},
},
expectOne: "",
expectTwo: "",
},
{
name: "missing stubDomains",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
},
},
expectOne: "",
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
out, err := translateStubDomainOfKubeDNSToForwardCoreDNS(kubeDNSStubDomain, testCase.configMap)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !strings.EqualFold(out, testCase.expectOne) && !strings.EqualFold(out, testCase.expectTwo) {
t.Errorf("expected to find %q or %q in output: %q", testCase.expectOne, testCase.expectTwo, out)
}
})
}
}
func TestTranslateUpstreamKubeDNSToCoreDNS(t *testing.T) {
testCases := []struct {
name string
configMap *v1.ConfigMap
expect string
}{
{
name: "expect resolv.conf",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
},
expect: "/etc/resolv.conf",
},
{
name: "expect list of Name Server IP addresses",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kubedns",
Namespace: "kube-system",
},
Data: map[string]string{
"stubDomains": ` {"foo.com" : ["1.2.3.4:5300"], "my.cluster.local" : ["2.3.4.5"]}`,
"upstreamNameservers": `["8.8.8.8", "8.8.4.4", "4.4.4.4"]`,
},
},
expect: "8.8.8.8 8.8.4.4 4.4.4.4",
},
{
name: "no stubDomains: expect list of Name Server IP addresses",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kubedns",
Namespace: "kube-system",
},
Data: map[string]string{
"upstreamNameservers": `["8.8.8.8", "8.8.4.4"]`,
},
},
expect: "8.8.8.8 8.8.4.4",
},
{
name: "Hostname present: expect NameServer to omit the hostname",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kubedns",
Namespace: "kube-system",
},
Data: map[string]string{
"upstreamNameservers": `["service.consul", "ns.foo.com", "8.8.4.4", "ns.moo.com", "ns.bar.com"]`,
},
},
expect: "8.8.4.4",
},
{
name: "All hostnames: return empty",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"upstreamNameservers": `["service.consul", "ns.foo.com"]`,
},
},
expect: "",
},
{
name: "IPv6: expect list of Name Server IP addresses",
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kubedns",
Namespace: "kube-system",
},
Data: map[string]string{
"upstreamNameservers": `["[2003::1]:53", "8.8.4.4"]`,
},
},
expect: "[2003::1]:53 8.8.4.4",
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
out, err := translateUpstreamNameServerOfKubeDNSToUpstreamForwardCoreDNS(kubeDNSUpstreamNameservers, testCase.configMap)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !strings.EqualFold(out, testCase.expect) {
t.Errorf("expected to find %q in output: %q", testCase.expect, out)
}
})
}
}
func TestDeploymentsHaveSystemClusterCriticalPriorityClassName(t *testing.T) {
replicas := int32(coreDNSReplicas)
testCases := []struct {
@ -504,25 +126,6 @@ func TestDeploymentsHaveSystemClusterCriticalPriorityClassName(t *testing.T) {
manifest string
data interface{}
}{
{
name: "KubeDNSDeployment",
manifest: KubeDNSDeployment,
data: struct {
DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, OldControlPlaneTaintKey, ControlPlaneTaintKey string
Replicas *int32
}{
DeploymentName: "foo",
KubeDNSImage: "foo",
DNSMasqImage: "foo",
SidecarImage: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
OldControlPlaneTaintKey: "foo",
ControlPlaneTaintKey: "foo",
Replicas: &replicas,
},
},
{
name: "CoreDNSDeployment",
manifest: CoreDNSDeployment,

View File

@ -17,174 +17,15 @@ limitations under the License.
package dns
const (
// KubeDNSDeployment is the kube-dns Deployment manifest for the kube-dns manifest for v1.7+
KubeDNSDeployment = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .DeploymentName }}
namespace: kube-system
labels:
k8s-app: kube-dns
spec:
replicas: {{ .Replicas }}
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{ .KubeDNSImage }}
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes control-plane service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{ .DNSDomain }}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{ .DNSMasqImage }}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/{{ .DNSDomain }}/{{ .DNSBindAddr }}#10053
- --server=/in-addr.arpa/{{ .DNSBindAddr }}#10053
- --server=/ip6.arpa/{{ .DNSBindAddr }}#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{ .SidecarImage }}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,{{ .DNSProbeAddr }}:10053,kubernetes.default.svc.{{ .DNSDomain }},5,SRV
- --probe=dnsmasq,{{ .DNSProbeAddr }}:53,kubernetes.default.svc.{{ .DNSDomain }},5,SRV
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: {{ .OldControlPlaneTaintKey }}
effect: NoSchedule
- key: {{ .ControlPlaneTaintKey }}
effect: NoSchedule
`
// KubeDNSService is the kube-dns Service manifest
KubeDNSService = `
// CoreDNSService is the CoreDNS Service manifest
CoreDNSService = `
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
kubernetes.io/name: "CoreDNS"
name: kube-dns
namespace: kube-system
annotations:
@ -324,14 +165,14 @@ data:
ttl 30
}
prometheus :9153
forward . {{ .UpstreamNameserver }} {
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}{{ .StubDomain }}
}
`
// CoreDNSClusterRole is the CoreDNS ClusterRole manifest
CoreDNSClusterRole = `

View File

@ -89,7 +89,6 @@ func getEtcdVersion(v *versionutil.Version) string {
}
const fakeCurrentCoreDNSVersion = "1.0.6"
const fakeCurrentKubeDNSVersion = "1.14.7"
func TestGetAvailableUpgrades(t *testing.T) {
@ -666,78 +665,6 @@ func TestGetAvailableUpgrades(t *testing.T) {
},
},
},
{
name: "kubedns to coredns",
vg: &fakeVersionGetter{
clusterVersion: v1Y2.String(),
kubeletVersion: v1Y2.String(), // the kubelet are on the same version as the control plane
kubeadmVersion: v1Z0.String(),
stablePatchVersion: v1Z0.String(),
stableVersion: v1Z0.String(),
},
beforeDNSType: kubeadmapi.KubeDNS,
beforeDNSVersion: fakeCurrentKubeDNSVersion,
dnsType: kubeadmapi.CoreDNS,
expectedUpgrades: []Upgrade{
{
Description: fmt.Sprintf("version in the v%d.%d series", v1Y0.Major(), v1Y0.Minor()),
Before: ClusterState{
KubeVersion: v1Y2.String(),
KubeletVersions: map[string]uint16{
v1Y2.String(): 1,
},
KubeadmVersion: v1Z0.String(),
DNSType: kubeadmapi.KubeDNS,
DNSVersion: fakeCurrentKubeDNSVersion,
EtcdVersion: fakeCurrentEtcdVersion,
},
After: ClusterState{
KubeVersion: v1Z0.String(),
KubeadmVersion: v1Z0.String(),
DNSType: kubeadmapi.CoreDNS,
DNSVersion: constants.CoreDNSVersion,
EtcdVersion: getEtcdVersion(v1Z0),
},
},
},
},
{
name: "keep coredns",
vg: &fakeVersionGetter{
clusterVersion: v1Y2.String(),
kubeletVersion: v1Y2.String(), // the kubelet are on the same version as the control plane
kubeadmVersion: v1Z0.String(),
stablePatchVersion: v1Z0.String(),
stableVersion: v1Z0.String(),
},
beforeDNSType: kubeadmapi.KubeDNS,
beforeDNSVersion: fakeCurrentKubeDNSVersion,
dnsType: kubeadmapi.KubeDNS,
expectedUpgrades: []Upgrade{
{
Description: fmt.Sprintf("version in the v%d.%d series", v1Y0.Major(), v1Y0.Minor()),
Before: ClusterState{
KubeVersion: v1Y2.String(),
KubeletVersions: map[string]uint16{
v1Y2.String(): 1,
},
KubeadmVersion: v1Z0.String(),
DNSType: kubeadmapi.KubeDNS,
DNSVersion: fakeCurrentKubeDNSVersion,
EtcdVersion: fakeCurrentEtcdVersion,
},
After: ClusterState{
KubeVersion: v1Z0.String(),
KubeadmVersion: v1Z0.String(),
DNSType: kubeadmapi.KubeDNS,
DNSVersion: constants.KubeDNSVersion,
EtcdVersion: getEtcdVersion(v1Z0),
},
},
},
},
}
// Instantiating a fake etcd cluster for being able to get etcd version for a corresponding
@ -746,9 +673,6 @@ func TestGetAvailableUpgrades(t *testing.T) {
t.Run(rt.name, func(t *testing.T) {
dnsName := constants.CoreDNSDeploymentName
if rt.beforeDNSType == kubeadmapi.KubeDNS {
dnsName = constants.KubeDNSDeploymentName
}
client := clientsetfake.NewSimpleClientset(&apps.Deployment{
TypeMeta: metav1.TypeMeta{

View File

@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
@ -102,12 +101,12 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
errs = append(errs, err)
}
// If the coredns / kube-dns ConfigMaps are missing, show a warning and assume that the
// If the coredns ConfigMap is missing, show a warning and assume that the
// DNS addon was skipped during "kubeadm init", and that its redeployment on upgrade is not desired.
//
// TODO: remove this once "kubeadm upgrade apply" phases are supported:
// https://github.com/kubernetes/kubeadm/issues/1318
var missingCoreDNSConfigMap, missingKubeDNSConfigMap bool
var missingCoreDNSConfigMap bool
if _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(
context.TODO(),
kubeadmconstants.CoreDNSConfigMap,
@ -115,30 +114,18 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
); err != nil && apierrors.IsNotFound(err) {
missingCoreDNSConfigMap = true
}
if _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(
context.TODO(),
kubeadmconstants.KubeDNSConfigMap,
metav1.GetOptions{},
); err != nil && apierrors.IsNotFound(err) {
missingKubeDNSConfigMap = true
}
if missingCoreDNSConfigMap && missingKubeDNSConfigMap {
klog.Warningf("the ConfigMaps %q/%q in the namespace %q were not found. "+
if missingCoreDNSConfigMap {
klog.Warningf("the ConfigMaps %q in the namespace %q were not found. "+
"Assuming that a DNS server was not deployed for this cluster. "+
"Note that once 'kubeadm upgrade apply' supports phases you "+
"will have to skip the DNS upgrade manually",
kubeadmconstants.CoreDNSConfigMap,
kubeadmconstants.KubeDNSConfigMap,
metav1.NamespaceSystem)
} else {
// Upgrade CoreDNS/kube-dns
// Upgrade CoreDNS
if err := dns.EnsureDNSAddon(&cfg.ClusterConfiguration, client); err != nil {
errs = append(errs, err)
}
// Remove the old DNS deployment if a new DNS service is now used (kube-dns to CoreDNS or vice versa)
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(&cfg.ClusterConfiguration, client, dryRun); err != nil {
errs = append(errs, err)
}
}
// If the kube-proxy ConfigMap is missing, show a warning and assume that kube-proxy
@ -167,44 +154,6 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
return errorsutil.NewAggregate(errs)
}
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, dryRun bool) error {
return apiclient.TryRunCommand(func() error {
installedDeploymentName := kubeadmconstants.KubeDNSDeploymentName
deploymentToDelete := kubeadmconstants.CoreDNSDeploymentName
if cfg.DNS.Type == kubeadmapi.CoreDNS {
installedDeploymentName = kubeadmconstants.CoreDNSDeploymentName
deploymentToDelete = kubeadmconstants.KubeDNSDeploymentName
}
nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
})
if err != nil {
return err
}
// If we're dry-running or there are no schedulable nodes available, we don't need to wait for the new DNS addon to become ready
if !dryRun && len(nodes.Items) != 0 {
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), installedDeploymentName, metav1.GetOptions{})
if err != nil {
return err
}
if dnsDeployment.Status.ReadyReplicas == 0 {
return errors.New("the DNS deployment isn't ready yet")
}
}
// We don't want to wait for the DNS deployment above to become ready when dryrunning (as it never will)
// but here we should execute the DELETE command against the dryrun clientset, as it will only be logged
err = apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, deploymentToDelete)
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}, 10)
}
func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, dryRun bool) error {
kubeletDir, err := GetKubeletDir(dryRun)
if err != nil {

View File

@ -17,7 +17,6 @@ limitations under the License.
package upgrade
import (
"context"
"fmt"
"os"
"strings"
@ -25,8 +24,6 @@ import (
"github.com/coredns/corefile-migration/migration"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@ -72,11 +69,6 @@ func RunCoreDNSMigrationCheck(client clientset.Interface, ignorePreflightErrors
client: client,
f: checkMigration,
},
&CoreDNSCheck{
name: "kubeDNSTranslation",
client: client,
f: checkKubeDNSConfigMap,
},
}
return preflight.RunChecks(migrationChecks, os.Stderr, ignorePreflightErrors)
@ -126,21 +118,3 @@ func checkMigration(client clientset.Interface) error {
}
return nil
}
// checkKubeDNSConfigMap checks if the translation of kube-dns to CoreDNS ConfigMap is supported
func checkKubeDNSConfigMap(client clientset.Interface) error {
kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeDNSConfigMap, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return err
}
if _, ok := kubeDNSConfigMap.Data["federations"]; ok {
klog.V(1).Infoln("CoreDNS no longer supports Federation and " +
"hence will not translate the federation data from kube-dns to CoreDNS ConfigMap")
return errors.New("kube-dns Federation data will not be translated")
}
return nil
}

View File

@ -80,7 +80,7 @@ func (idr *InitDryRunGetter) HandleListAction(action core.ListAction) (bool, run
}
// handleKubernetesService returns a faked Kubernetes service in order to be able to continue running kubeadm init.
// The kube-dns addon code GETs the Kubernetes service in order to extract the service subnet
// The CoreDNS addon code GETs the Kubernetes service in order to extract the service subnet
func (idr *InitDryRunGetter) handleKubernetesService(action core.GetAction) (bool, runtime.Object, error) {
if action.GetName() != "kubernetes" || action.GetNamespace() != metav1.NamespaceDefault || action.GetResource().Resource != "services" {
// We can't handle this event
@ -97,7 +97,7 @@ func (idr *InitDryRunGetter) handleKubernetesService(action core.GetAction) (boo
return true, nil, errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", svcSubnet.String())
}
// The only used field of this Service object is the ClusterIP, which kube-dns uses to calculate its own IP
// The only used field of this Service object is the ClusterIP, which CoreDNS uses to calculate its own IP
return true, &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernetes",