diff --git a/cmd/kubeadm/app/cmd/phases/init/etcd.go b/cmd/kubeadm/app/cmd/phases/init/etcd.go index ad8d821ac28..8c09b3049cc 100644 --- a/cmd/kubeadm/app/cmd/phases/init/etcd.go +++ b/cmd/kubeadm/app/cmd/phases/init/etcd.go @@ -93,7 +93,7 @@ func runEtcdPhaseLocal() func(c workflow.RunData) error { return err } } else { - fmt.Printf("[dryrun] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir) + fmt.Printf("[etcd] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir) } fmt.Printf("[etcd] Creating static Pod manifest for local etcd in %q\n", data.ManifestDir()) if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(data.ManifestDir(), data.PatchesDir(), cfg.NodeRegistration.Name, &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, data.DryRun()); err != nil { diff --git a/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go b/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go index f6382026a2b..55e1f2e296b 100644 --- a/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go +++ b/cmd/kubeadm/app/cmd/phases/join/controlplanejoin.go @@ -126,7 +126,7 @@ func runEtcdPhase(c workflow.RunData) error { } // in case of local etcd if cfg.Etcd.External != nil { - fmt.Println("[control-plane-join] using external etcd - no local stacked instance added") + fmt.Println("[control-plane-join] Using external etcd - no local stacked instance added") return nil } @@ -136,7 +136,7 @@ func runEtcdPhase(c workflow.RunData) error { return err } } else { - fmt.Printf("[dryrun] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir) + fmt.Printf("[control-plane-join] Would ensure that %q directory is present\n", cfg.Etcd.Local.DataDir) } // Adds a new etcd instance; in order to do this the new etcd instance should be "announced" to @@ -194,7 +194,7 @@ func runMarkControlPlanePhase(c workflow.RunData) error { return errors.Wrap(err, "error applying control-plane label and taints") } } else { - fmt.Printf("[dryrun] Would mark node %s as a control-plane\n", cfg.NodeRegistration.Name) + fmt.Printf("[control-plane-join] Would mark node %s as a control-plane\n", cfg.NodeRegistration.Name) } return nil diff --git a/cmd/kubeadm/app/cmd/phases/join/kubelet.go b/cmd/kubeadm/app/cmd/phases/join/kubelet.go index 25a2c2f1005..377db62dab0 100644 --- a/cmd/kubeadm/app/cmd/phases/join/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/join/kubelet.go @@ -170,7 +170,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { klog.V(1).Infoln("[kubelet-start] Stopping the kubelet") kubeletphase.TryStopKubelet() } else { - fmt.Println("[dryrun] Would stop the kubelet") + fmt.Println("[kubelet-start] Would stop the kubelet") } // Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start @@ -187,7 +187,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { } if data.DryRun() { - fmt.Println("[dryrun] Would start the kubelet") + fmt.Println("[kubelet-start] Would start the kubelet") // If we're dry-running, print the kubelet config manifests and print static pod manifests if joining a control plane. // TODO: think of a better place to move this call - e.g. a hidden phase. if err := dryrunutil.PrintFilesIfDryRunning(cfg.ControlPlane != nil, data.ManifestDir(), data.OutputWriter()); err != nil { diff --git a/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go b/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go index 430fa2a03d2..4c218ee4204 100644 --- a/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go +++ b/cmd/kubeadm/app/cmd/phases/upgrade/node/kubeletconfig.go @@ -118,7 +118,7 @@ func runKubeletConfigPhase() func(c workflow.RunData) error { return errors.Wrapf(err, "error updating the CRI socket for Node %q", nro.Name) } } else { - fmt.Println("[dryrun] would update the node CRI socket path to include an URL scheme") + fmt.Println("[upgrade] Would update the node CRI socket path to include an URL scheme") } } diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 1705dfd175f..782657a7363 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -170,7 +170,7 @@ func runApply(flags *applyFlags, args []string) error { } if flags.dryRun { - fmt.Println("[dryrun] Finished dryrunning successfully!") + fmt.Println("[upgrade/successful] Finished dryrunning successfully!") return nil } diff --git a/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go b/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go index e2a38e6e44b..361840fa17d 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go +++ b/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go @@ -50,7 +50,7 @@ const ( // AllowBootstrapTokensToPostCSRs creates RBAC rules in a way the makes Node Bootstrap Tokens able to post CSRs func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error { - fmt.Println("[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials") + fmt.Println("[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials") return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -72,7 +72,7 @@ func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error { // AllowBoostrapTokensToGetNodes creates RBAC rules to allow Node Bootstrap Tokens to list nodes func AllowBoostrapTokensToGetNodes(client clientset.Interface) error { - fmt.Println("[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes") + fmt.Println("[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes") if err := apiclient.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ @@ -111,7 +111,7 @@ func AllowBoostrapTokensToGetNodes(client clientset.Interface) error { // AutoApproveNodeBootstrapTokens creates RBAC rules in a way that makes Node Bootstrap Tokens' CSR auto-approved by the csrapprover controller func AutoApproveNodeBootstrapTokens(client clientset.Interface) error { - fmt.Println("[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token") + fmt.Println("[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token") // Always create this kubeadm-specific binding though return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{ @@ -134,7 +134,7 @@ func AutoApproveNodeBootstrapTokens(client clientset.Interface) error { // AutoApproveNodeCertificateRotation creates RBAC rules in a way that makes Node certificate rotation CSR auto-approved by the csrapprover controller func AutoApproveNodeCertificateRotation(client clientset.Interface) error { - fmt.Println("[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster") + fmt.Println("[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster") return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index eb02a8af974..8cd0bf577cd 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -103,7 +103,7 @@ func CreateStaticPodFiles(manifestDir, patchesDir string, cfg *kubeadmapi.Cluste var err error if features.Enabled(cfg.FeatureGates, features.RootlessControlPlane) { if isDryRun { - fmt.Printf("[dryrun] Would create users and groups for %+v to run as non-root\n", componentNames) + fmt.Printf("[control-plane] Would create users and groups for %+v to run as non-root\n", componentNames) } else { usersAndGroups, err = staticpodutil.GetUsersAndGroups() if err != nil { @@ -127,7 +127,7 @@ func CreateStaticPodFiles(manifestDir, patchesDir string, cfg *kubeadmapi.Cluste if features.Enabled(cfg.FeatureGates, features.RootlessControlPlane) { if isDryRun { - fmt.Printf("[dryrun] Would update static pod manifest for %q to run run as non-root\n", componentName) + fmt.Printf("[control-plane] Would update static pod manifest for %q to run run as non-root\n", componentName) } else { if usersAndGroups != nil { if err := staticpodutil.RunComponentAsNonRoot(componentName, &spec, usersAndGroups, cfg); err != nil { diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index 87a412a3162..14ca6bd8298 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -146,7 +146,7 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest var cluster []etcdutil.Member if isDryRun { - fmt.Printf("[dryrun] Would add etcd member: %s\n", etcdPeerAddress) + fmt.Printf("[etcd] Would add etcd member: %s\n", etcdPeerAddress) } else { klog.V(1).Infof("[etcd] Adding etcd member: %s", etcdPeerAddress) cluster, err = etcdClient.AddMember(nodeName, etcdPeerAddress) @@ -164,7 +164,7 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest } if isDryRun { - fmt.Println("[dryrun] Would wait for the new etcd member to join the cluster") + fmt.Println("[etcd] Would wait for the new etcd member to join the cluster") return nil } @@ -264,8 +264,8 @@ func prepareAndWriteEtcdStaticPod(manifestDir string, patchesDir string, cfg *ku var err error if features.Enabled(cfg.FeatureGates, features.RootlessControlPlane) { if isDryRun { - fmt.Printf("[dryrun] Would create users and groups for %q to run as non-root\n", kubeadmconstants.Etcd) - fmt.Printf("[dryrun] Would update static pod manifest for %q to run run as non-root\n", kubeadmconstants.Etcd) + fmt.Printf("[etcd] Would create users and groups for %q to run as non-root\n", kubeadmconstants.Etcd) + fmt.Printf("[etcd] Would update static pod manifest for %q to run run as non-root\n", kubeadmconstants.Etcd) } else { usersAndGroups, err = staticpodutil.GetUsersAndGroups() if err != nil { diff --git a/cmd/kubeadm/app/phases/kubelet/kubelet.go b/cmd/kubeadm/app/phases/kubelet/kubelet.go index e32e4e68f8f..18c4f612b91 100644 --- a/cmd/kubeadm/app/phases/kubelet/kubelet.go +++ b/cmd/kubeadm/app/phases/kubelet/kubelet.go @@ -30,12 +30,12 @@ func TryStartKubelet() { // If we notice that the kubelet service is inactive, try to start it initSystem, err := initsystem.GetInitSystem() if err != nil { - fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet is running properly.") + fmt.Println("[kubelet-start] No supported init system detected, won't make sure the kubelet is running properly.") return } if !initSystem.ServiceExists(kubeadmconstants.Kubelet) { - fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet is running properly.") + fmt.Println("[kubelet-start] Couldn't detect a kubelet service, can't make sure the kubelet is running properly.") } // This runs "systemctl daemon-reload && systemctl restart kubelet" @@ -50,12 +50,12 @@ func TryStopKubelet() { // If we notice that the kubelet service is inactive, try to start it initSystem, err := initsystem.GetInitSystem() if err != nil { - fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.") + fmt.Println("[kubelet-start] No supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.") return } if !initSystem.ServiceExists(kubeadmconstants.Kubelet) { - fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.") + fmt.Println("[kubelet-start] Couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.") } // This runs "systemctl daemon-reload && systemctl stop kubelet" @@ -69,12 +69,12 @@ func TryRestartKubelet() { // If we notice that the kubelet service is inactive, try to start it initSystem, err := initsystem.GetInitSystem() if err != nil { - fmt.Println("[kubelet-start] no supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.") + fmt.Println("[kubelet-start] No supported init system detected, won't make sure the kubelet not running for a short period of time while setting up configuration for it.") return } if !initSystem.ServiceExists(kubeadmconstants.Kubelet) { - fmt.Println("[kubelet-start] couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.") + fmt.Println("[kubelet-start] Couldn't detect a kubelet service, can't make sure the kubelet not running for a short period of time while setting up configuration for it.") } // This runs "systemctl daemon-reload && systemctl stop kubelet" diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index 7ac2fa317cc..980e80265a9 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -104,7 +104,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) // If client.Discovery().RESTClient() is nil, the fake client is used. // Return early because the kubeadm dryrun dynamic client only handles the core/v1 GroupVersion. if client.Discovery().RESTClient() == nil { - fmt.Printf("[dryrun] Would create the Job %q in namespace %q and wait until it completes\n", jobName, ns) + fmt.Printf("[upgrade/health] Would create the Job %q in namespace %q and wait until it completes\n", jobName, ns) return nil } diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index c3e169489de..e1b22183e8c 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -251,7 +251,7 @@ func LabelOldControlPlaneNodes(client clientset.Interface) error { func UpdateKubeletDynamicEnvFileWithURLScheme(dryRun bool) error { filePath := filepath.Join(kubeadmconstants.KubeletRunDirectory, kubeadmconstants.KubeletEnvFileName) if dryRun { - fmt.Printf("[dryrun] Would ensure that %q includes a CRI endpoint URL scheme\n", filePath) + fmt.Printf("[upgrade] Would ensure that %q includes a CRI endpoint URL scheme\n", filePath) return nil } klog.V(2).Infof("Ensuring that %q includes a CRI endpoint URL scheme", filePath)