mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #107584 from SataQiu/clean-kubeadm-20220115
kubeadm: make the phase prefix and capitalization consistent
This commit is contained in:
commit
b6c06a95d7
@ -93,7 +93,7 @@ func runEtcdPhaseLocal() func(c workflow.RunData) error {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[dryrun] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir)
|
||||
fmt.Printf("[etcd] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir)
|
||||
}
|
||||
fmt.Printf("[etcd] Creating static Pod manifest for local etcd in %q\n", data.ManifestDir())
|
||||
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(data.ManifestDir(), data.PatchesDir(), cfg.NodeRegistration.Name, &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, data.DryRun()); err != nil {
|
||||
|
@ -126,7 +126,7 @@ func runEtcdPhase(c workflow.RunData) error {
|
||||
}
|
||||
// in case of local etcd
|
||||
if cfg.Etcd.External != nil {
|
||||
fmt.Println("[control-plane-join] using external etcd - no local stacked instance added")
|
||||
fmt.Println("[control-plane-join] Using external etcd - no local stacked instance added")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ func runEtcdPhase(c workflow.RunData) error {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[dryrun] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir)
|
||||
fmt.Printf("[control-plane-join] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir)
|
||||
}
|
||||
|
||||
// Adds a new etcd instance; in order to do this the new etcd instance should be "announced" to
|
||||
@ -194,7 +194,7 @@ func runMarkControlPlanePhase(c workflow.RunData) error {
|
||||
return errors.Wrap(err, "error applying control-plane label and taints")
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[dryrun] Would mark node %s as a control-plane\n", cfg.NodeRegistration.Name)
|
||||
fmt.Printf("[control-plane-join] Would mark node %s as a control-plane\n", cfg.NodeRegistration.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -170,7 +170,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
|
||||
klog.V(1).Infoln("[kubelet-start] Stopping the kubelet")
|
||||
kubeletphase.TryStopKubelet()
|
||||
} else {
|
||||
fmt.Println("[dryrun] Would stop the kubelet")
|
||||
fmt.Println("[kubelet-start] Would stop the kubelet")
|
||||
}
|
||||
|
||||
// Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start
|
||||
@ -187,7 +187,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
|
||||
}
|
||||
|
||||
if data.DryRun() {
|
||||
fmt.Println("[dryrun] Would start the kubelet")
|
||||
fmt.Println("[kubelet-start] Would start the kubelet")
|
||||
// If we're dry-running, print the kubelet config manifests and print static pod manifests if joining a control plane.
|
||||
// TODO: think of a better place to move this call - e.g. a hidden phase.
|
||||
if err := dryrunutil.PrintFilesIfDryRunning(cfg.ControlPlane != nil, data.ManifestDir(), data.OutputWriter()); err != nil {
|
||||
|
@ -118,7 +118,7 @@ func runKubeletConfigPhase() func(c workflow.RunData) error {
|
||||
return errors.Wrapf(err, "error updating the CRI socket for Node %q", nro.Name)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[dryrun] would update the node CRI socket path to include an URL scheme")
|
||||
fmt.Println("[upgrade] Would update the node CRI socket path to include an URL scheme")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ func runApply(flags *applyFlags, args []string) error {
|
||||
}
|
||||
|
||||
if flags.dryRun {
|
||||
fmt.Println("[dryrun] Finished dryrunning successfully!")
|
||||
fmt.Println("[upgrade/successful] Finished dryrunning successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ const (
|
||||
|
||||
// AllowBootstrapTokensToPostCSRs creates RBAC rules in a way the makes Node Bootstrap Tokens able to post CSRs
|
||||
func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
|
||||
fmt.Println("[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials")
|
||||
fmt.Println("[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials")
|
||||
|
||||
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -72,7 +72,7 @@ func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
|
||||
|
||||
// AllowBoostrapTokensToGetNodes creates RBAC rules to allow Node Bootstrap Tokens to list nodes
|
||||
func AllowBoostrapTokensToGetNodes(client clientset.Interface) error {
|
||||
fmt.Println("[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes")
|
||||
fmt.Println("[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes")
|
||||
|
||||
if err := apiclient.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -111,7 +111,7 @@ func AllowBoostrapTokensToGetNodes(client clientset.Interface) error {
|
||||
|
||||
// AutoApproveNodeBootstrapTokens creates RBAC rules in a way that makes Node Bootstrap Tokens' CSR auto-approved by the csrapprover controller
|
||||
func AutoApproveNodeBootstrapTokens(client clientset.Interface) error {
|
||||
fmt.Println("[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token")
|
||||
fmt.Println("[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token")
|
||||
|
||||
// Always create this kubeadm-specific binding though
|
||||
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
|
||||
@ -134,7 +134,7 @@ func AutoApproveNodeBootstrapTokens(client clientset.Interface) error {
|
||||
|
||||
// AutoApproveNodeCertificateRotation creates RBAC rules in a way that makes Node certificate rotation CSR auto-approved by the csrapprover controller
|
||||
func AutoApproveNodeCertificateRotation(client clientset.Interface) error {
|
||||
fmt.Println("[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster")
|
||||
fmt.Println("[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster")
|
||||
|
||||
return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -103,7 +103,7 @@ func CreateStaticPodFiles(manifestDir, patchesDir string, cfg *kubeadmapi.Cluste
|
||||
var err error
|
||||
if features.Enabled(cfg.FeatureGates, features.RootlessControlPlane) {
|
||||
if isDryRun {
|
||||
fmt.Printf("[dryrun] Would create users and groups for %+v to run as non-root\n", componentNames)
|
||||
fmt.Printf("[control-plane] Would create users and groups for %+v to run as non-root\n", componentNames)
|
||||
} else {
|
||||
usersAndGroups, err = staticpodutil.GetUsersAndGroups()
|
||||
if err != nil {
|
||||
@ -127,7 +127,7 @@ func CreateStaticPodFiles(manifestDir, patchesDir string, cfg *kubeadmapi.Cluste
|
||||
|
||||
if features.Enabled(cfg.FeatureGates, features.RootlessControlPlane) {
|
||||
if isDryRun {
|
||||
fmt.Printf("[dryrun] Would update static pod manifest for %q to run run as non-root\n", componentName)
|
||||
fmt.Printf("[control-plane] Would update static pod manifest for %q to run run as non-root\n", componentName)
|
||||
} else {
|
||||
if usersAndGroups != nil {
|
||||
if err := staticpodutil.RunComponentAsNonRoot(componentName, &spec, usersAndGroups, cfg); err != nil {
|
||||
|
@ -146,7 +146,7 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest
|
||||
|
||||
var cluster []etcdutil.Member
|
||||
if isDryRun {
|
||||
fmt.Printf("[dryrun] Would add etcd member: %s\n", etcdPeerAddress)
|
||||
fmt.Printf("[etcd] Would add etcd member: %s\n", etcdPeerAddress)
|
||||
} else {
|
||||
klog.V(1).Infof("[etcd] Adding etcd member: %s", etcdPeerAddress)
|
||||
cluster, err = etcdClient.AddMember(nodeName, etcdPeerAddress)
|
||||
@ -164,7 +164,7 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest
|
||||
}
|
||||
|
||||
if isDryRun {
|
||||
fmt.Println("[dryrun] Would wait for the new etcd member to join the cluster")
|
||||
fmt.Println("[etcd] Would wait for the new etcd member to join the cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -264,8 +264,8 @@ func prepareAndWriteEtcdStaticPod(manifestDir string, patchesDir string, cfg *ku
|
||||
var err error
|
||||
if features.Enabled(cfg.FeatureGates, features.RootlessControlPlane) {
|
||||
if isDryRun {
|
||||
fmt.Printf("[dryrun] Would create users and groups for %q to run as non-root\n", kubeadmconstants.Etcd)
|
||||
fmt.Printf("[dryrun] Would update static pod manifest for %q to run run as non-root\n", kubeadmconstants.Etcd)
|
||||
fmt.Printf("[etcd] Would create users and groups for %q to run as non-root\n", kubeadmconstants.Etcd)
|
||||
fmt.Printf("[etcd] Would update static pod manifest for %q to run run as non-root\n", kubeadmconstants.Etcd)
|
||||
} else {
|
||||
usersAndGroups, err = staticpodutil.GetUsersAndGroups()
|
||||
if err != nil {
|
||||
|
@ -30,12 +30,12 @@ func TryStartKubelet() {
|
||||
// If we notice that the kubelet service is inactive, try to start it
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet is running properly.")
|
||||
fmt.Println("[kubelet-start] No supported init system detected, won't make sure the kubelet is running properly.")
|
||||
return
|
||||
}
|
||||
|
||||
if !initSystem.ServiceExists(kubeadmconstants.Kubelet) {
|
||||
fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet is running properly.")
|
||||
fmt.Println("[kubelet-start] Couldn't detect a kubelet service, can't make sure the kubelet is running properly.")
|
||||
}
|
||||
|
||||
// This runs "systemctl daemon-reload && systemctl restart kubelet"
|
||||
@ -50,12 +50,12 @@ func TryStopKubelet() {
|
||||
// If we notice that the kubelet service is inactive, try to start it
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
fmt.Println("[kubelet-start] No supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
return
|
||||
}
|
||||
|
||||
if !initSystem.ServiceExists(kubeadmconstants.Kubelet) {
|
||||
fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
fmt.Println("[kubelet-start] Couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
}
|
||||
|
||||
// This runs "systemctl daemon-reload && systemctl stop kubelet"
|
||||
@ -69,12 +69,12 @@ func TryRestartKubelet() {
|
||||
// If we notice that the kubelet service is inactive, try to start it
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
fmt.Println("[kubelet-start] No supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
return
|
||||
}
|
||||
|
||||
if !initSystem.ServiceExists(kubeadmconstants.Kubelet) {
|
||||
fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
fmt.Println("[kubelet-start] Couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.")
|
||||
}
|
||||
|
||||
// This runs "systemctl daemon-reload && systemctl stop kubelet"
|
||||
|
@ -104,7 +104,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
||||
// If client.Discovery().RESTClient() is nil, the fake client is used.
|
||||
// Return early because the kubeadm dryrun dynamic client only handles the core/v1 GroupVersion.
|
||||
if client.Discovery().RESTClient() == nil {
|
||||
fmt.Printf("[dryrun] Would create the Job %q in namespace %q and wait until it completes\n", jobName, ns)
|
||||
fmt.Printf("[upgrade/health] Would create the Job %q in namespace %q and wait until it completes\n", jobName, ns)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -251,7 +251,7 @@ func LabelOldControlPlaneNodes(client clientset.Interface) error {
|
||||
func UpdateKubeletDynamicEnvFileWithURLScheme(dryRun bool) error {
|
||||
filePath := filepath.Join(kubeadmconstants.KubeletRunDirectory, kubeadmconstants.KubeletEnvFileName)
|
||||
if dryRun {
|
||||
fmt.Printf("[dryrun] Would ensure that %q includes a CRI endpoint URL scheme\n", filePath)
|
||||
fmt.Printf("[upgrade] Would ensure that %q includes a CRI endpoint URL scheme\n", filePath)
|
||||
return nil
|
||||
}
|
||||
klog.V(2).Infof("Ensuring that %q includes a CRI endpoint URL scheme", filePath)
|
||||
|
Loading…
Reference in New Issue
Block a user