mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-10-21 14:38:46 +00:00
kubeadm cleanup: master -> control-plane (cont.2)
This commit is contained in:
@@ -265,7 +265,7 @@ var (
|
||||
CAName: "ca",
|
||||
config: certutil.Config{
|
||||
CommonName: kubeadmconstants.APIServerKubeletClientCertCommonName,
|
||||
Organization: []string{kubeadmconstants.MastersGroup},
|
||||
Organization: []string{kubeadmconstants.SystemPrivilegedGroup},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
}
|
||||
@@ -341,7 +341,7 @@ var (
|
||||
CAName: "etcd-ca",
|
||||
config: certutil.Config{
|
||||
CommonName: kubeadmconstants.EtcdHealthcheckClientCertCommonName,
|
||||
Organization: []string{kubeadmconstants.MastersGroup},
|
||||
Organization: []string{kubeadmconstants.SystemPrivilegedGroup},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
}
|
||||
@@ -353,7 +353,7 @@ var (
|
||||
CAName: "etcd-ca",
|
||||
config: certutil.Config{
|
||||
CommonName: kubeadmconstants.APIServerEtcdClientCertCommonName,
|
||||
Organization: []string{kubeadmconstants.MastersGroup},
|
||||
Organization: []string{kubeadmconstants.SystemPrivilegedGroup},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
}
|
||||
|
@@ -147,7 +147,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
|
||||
ClientName: "kubernetes-admin",
|
||||
ClientCertAuth: &clientCertAuth{
|
||||
CAKey: caKey,
|
||||
Organizations: []string{kubeadmconstants.MastersGroup},
|
||||
Organizations: []string{kubeadmconstants.SystemPrivilegedGroup},
|
||||
},
|
||||
},
|
||||
kubeadmconstants.KubeletKubeConfigFileName: {
|
||||
|
@@ -118,7 +118,7 @@ func TestGetKubeConfigSpecs(t *testing.T) {
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.AdminKubeConfigFileName,
|
||||
clientName: "kubernetes-admin",
|
||||
organizations: []string{kubeadmconstants.MastersGroup},
|
||||
organizations: []string{kubeadmconstants.SystemPrivilegedGroup},
|
||||
},
|
||||
{
|
||||
kubeConfigFile: kubeadmconstants.KubeletKubeConfigFileName,
|
||||
|
@@ -49,7 +49,7 @@ func TestMarkControlPlane(t *testing.T) {
|
||||
"control-plane label and taint missing",
|
||||
"",
|
||||
nil,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
|
||||
},
|
||||
{
|
||||
@@ -62,22 +62,22 @@ func TestMarkControlPlane(t *testing.T) {
|
||||
{
|
||||
"control-plane label missing",
|
||||
"",
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
|
||||
},
|
||||
{
|
||||
"control-plane taint missing",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
nil,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
|
||||
},
|
||||
{
|
||||
"nothing missing",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
"{}",
|
||||
},
|
||||
{
|
||||
@@ -101,7 +101,7 @@ func TestMarkControlPlane(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
[]v1.Taint{kubeadmconstants.MasterTaint},
|
||||
[]v1.Taint{kubeadmconstants.ControlPlaneTaint},
|
||||
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"},{\"effect\":\"NoSchedule\",\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\"}]}}",
|
||||
},
|
||||
}
|
||||
|
@@ -95,11 +95,11 @@ func addNodeSelectorToPodSpec(podSpec *v1.PodSpec) {
|
||||
// setMasterTolerationOnPodSpec makes the Pod tolerate the master taint
|
||||
func setMasterTolerationOnPodSpec(podSpec *v1.PodSpec) {
|
||||
if podSpec.Tolerations == nil {
|
||||
podSpec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration}
|
||||
podSpec.Tolerations = []v1.Toleration{kubeadmconstants.ControlPlaneToleration}
|
||||
return
|
||||
}
|
||||
|
||||
podSpec.Tolerations = append(podSpec.Tolerations, kubeadmconstants.MasterToleration)
|
||||
podSpec.Tolerations = append(podSpec.Tolerations, kubeadmconstants.ControlPlaneToleration)
|
||||
}
|
||||
|
||||
// setHostIPOnPodSpec sets the environment variable HOST_IP using downward API
|
||||
|
@@ -69,7 +69,7 @@ func TestMutatePodSpec(t *testing.T) {
|
||||
kubeadmconstants.LabelNodeRoleMaster: "",
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
kubeadmconstants.MasterToleration,
|
||||
kubeadmconstants.ControlPlaneToleration,
|
||||
},
|
||||
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
||||
},
|
||||
@@ -83,7 +83,7 @@ func TestMutatePodSpec(t *testing.T) {
|
||||
kubeadmconstants.LabelNodeRoleMaster: "",
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
kubeadmconstants.MasterToleration,
|
||||
kubeadmconstants.ControlPlaneToleration,
|
||||
},
|
||||
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
||||
},
|
||||
@@ -97,7 +97,7 @@ func TestMutatePodSpec(t *testing.T) {
|
||||
kubeadmconstants.LabelNodeRoleMaster: "",
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
kubeadmconstants.MasterToleration,
|
||||
kubeadmconstants.ControlPlaneToleration,
|
||||
},
|
||||
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
||||
},
|
||||
@@ -168,7 +168,7 @@ func TestSetMasterTolerationOnPodSpec(t *testing.T) {
|
||||
podSpec: &v1.PodSpec{},
|
||||
expected: v1.PodSpec{
|
||||
Tolerations: []v1.Toleration{
|
||||
kubeadmconstants.MasterToleration,
|
||||
kubeadmconstants.ControlPlaneToleration,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -182,7 +182,7 @@ func TestSetMasterTolerationOnPodSpec(t *testing.T) {
|
||||
expected: v1.PodSpec{
|
||||
Tolerations: []v1.Toleration{
|
||||
{Key: "foo", Value: "bar"},
|
||||
kubeadmconstants.MasterToleration,
|
||||
kubeadmconstants.ControlPlaneToleration,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@@ -75,7 +75,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
|
||||
}
|
||||
}
|
||||
|
||||
for _, componentName := range kubeadmconstants.MasterComponents {
|
||||
for _, componentName := range kubeadmconstants.ControlPlaneComponents {
|
||||
start := time.Now()
|
||||
manifestPath := kubeadmconstants.GetStaticPodFilepath(componentName, manifestsDir)
|
||||
|
||||
|
@@ -126,7 +126,7 @@ func masterNodesReady(client clientset.Interface) error {
|
||||
// staticPodManifestHealth makes sure the required static pods are presents
|
||||
func staticPodManifestHealth(_ clientset.Interface) error {
|
||||
nonExistentManifests := []string{}
|
||||
for _, component := range constants.MasterComponents {
|
||||
for _, component := range constants.ControlPlaneComponents {
|
||||
manifestFile := constants.GetStaticPodFilepath(component, constants.GetStaticPodDirectory())
|
||||
if _, err := os.Stat(manifestFile); os.IsNotExist(err) {
|
||||
nonExistentManifests = append(nonExistentManifests, manifestFile)
|
||||
@@ -152,7 +152,7 @@ func IsControlPlaneSelfHosted(client clientset.Interface) bool {
|
||||
// getNotReadyDaemonSets gets the amount of Ready control plane DaemonSets
|
||||
func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) {
|
||||
notReadyDaemonSets := []error{}
|
||||
for _, component := range constants.MasterComponents {
|
||||
for _, component := range constants.ControlPlaneComponents {
|
||||
dsName := constants.AddSelfHostedPrefix(component)
|
||||
ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@@ -42,7 +42,7 @@ type Prepuller interface {
|
||||
DeleteFunc(string) error
|
||||
}
|
||||
|
||||
// DaemonSetPrepuller makes sure the control plane images are available on all masters
|
||||
// DaemonSetPrepuller makes sure the control-plane images are available on all masters
|
||||
type DaemonSetPrepuller struct {
|
||||
client clientset.Interface
|
||||
cfg *kubeadmapi.ClusterConfiguration
|
||||
@@ -181,7 +181,7 @@ func buildPrePullDaemonSet(component, image string) *apps.DaemonSet {
|
||||
NodeSelector: map[string]string{
|
||||
constants.LabelNodeRoleMaster: "",
|
||||
},
|
||||
Tolerations: []v1.Toleration{constants.MasterToleration},
|
||||
Tolerations: []v1.Toleration{constants.ControlPlaneToleration},
|
||||
TerminationGracePeriodSeconds: &gracePeriodSecs,
|
||||
},
|
||||
},
|
||||
|
@@ -141,7 +141,7 @@ func TestPrepullImagesInParallel(t *testing.T) {
|
||||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.MasterComponents, constants.Etcd))
|
||||
actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.ControlPlaneComponents, constants.Etcd))
|
||||
if (actualErr != nil) != rt.expectedErr {
|
||||
t.Errorf(
|
||||
"failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",
|
||||
|
@@ -436,7 +436,7 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter,
|
||||
return errors.Wrap(err, "error creating init static pod manifest files")
|
||||
}
|
||||
|
||||
for _, component := range constants.MasterComponents {
|
||||
for _, component := range constants.ControlPlaneComponents {
|
||||
if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests); err != nil {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user