kubeadm: use Infoln instead of Infof where appropriate

This commit is contained in:
Dmitry Rozhkov 2019-02-28 16:12:34 +02:00
parent 343bb4bd6b
commit bb8f4ac84f
10 changed files with 25 additions and 25 deletions

View File

@ -97,7 +97,7 @@ func runEtcdPhaseLocal() func(c workflow.RunData) error {
return errors.Wrap(err, "error creating local etcd static pod manifest file")
}
} else {
klog.V(1).Infof("[etcd] External etcd mode. Skipping the creation of a manifest for local etcd")
klog.V(1).Infoln("[etcd] External etcd mode. Skipping the creation of a manifest for local etcd")
}
return nil
}

View File

@ -58,7 +58,7 @@ func runKubeletStart(c workflow.RunData) error {
// First off, configure the kubelet. In this short timeframe, kubeadm is trying to stop/restart the kubelet
// Try to stop the kubelet service so no race conditions occur when configuring it
if !data.DryRun() {
klog.V(1).Infof("Stopping the kubelet")
klog.V(1).Infoln("Stopping the kubelet")
kubeletphase.TryStopKubelet()
}
@ -76,7 +76,7 @@ func runKubeletStart(c workflow.RunData) error {
// Try to start the kubelet service in case it's inactive
if !data.DryRun() {
klog.V(1).Infof("Starting the kubelet")
klog.V(1).Infoln("Starting the kubelet")
kubeletphase.TryStartKubelet()
}

View File

@ -50,7 +50,7 @@ func runUploadCerts(c workflow.RunData) error {
}
if !data.UploadCerts() {
klog.V(1).Infof("[upload-certs] Skipping certs upload")
klog.V(1).Infoln("[upload-certs] Skipping certs upload")
return nil
}
client, err := data.Client()

View File

@ -106,7 +106,7 @@ func runUploadKubeadmConfig(c workflow.RunData) error {
return err
}
klog.V(1).Infof("[upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap")
klog.V(1).Infoln("[upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap")
if err := uploadconfig.UploadConfiguration(cfg, client); err != nil {
return errors.Wrap(err, "error uploading the kubeadm ClusterConfiguration")
}
@ -120,12 +120,12 @@ func runUploadKubeletConfig(c workflow.RunData) error {
return err
}
klog.V(1).Infof("[upload-config] Uploading the kubelet component config to a ConfigMap")
klog.V(1).Infoln("[upload-config] Uploading the kubelet component config to a ConfigMap")
if err = kubeletphase.CreateConfigMap(cfg.ClusterConfiguration.ComponentConfigs.Kubelet, cfg.KubernetesVersion, client); err != nil {
return errors.Wrap(err, "error creating kubelet configuration ConfigMap")
}
klog.V(1).Infof("[upload-config] Preserving the CRISocket information for the control-plane node")
klog.V(1).Infoln("[upload-config] Preserving the CRISocket information for the control-plane node")
if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil {
return errors.Wrap(err, "Error writing Crisocket information for the control-plane node")
}

View File

@ -77,7 +77,7 @@ func runWaitControlPlanePhase(c workflow.RunData) error {
}
// waiter holds the apiclient.Waiter implementation of choice, responsible for querying the API server in various ways and waiting for conditions to be fulfilled
klog.V(1).Infof("[wait-control-plane] Waiting for the API server to be healthy")
klog.V(1).Infoln("[wait-control-plane] Waiting for the API server to be healthy")
client, err := data.Client()
if err != nil {

View File

@ -126,7 +126,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) error {
// Configure the kubelet. In this short timeframe, kubeadm is trying to stop/restart the kubelet
// Try to stop the kubelet service so no race conditions occur when configuring it
klog.V(1).Infof("[kubelet-start] Stopping the kubelet")
klog.V(1).Infoln("[kubelet-start] Stopping the kubelet")
kubeletphase.TryStopKubelet()
// Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start
@ -143,7 +143,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) error {
}
// Try to start the kubelet service in case it's inactive
klog.V(1).Infof("[kubelet-start] Starting the kubelet")
klog.V(1).Infoln("[kubelet-start] Starting the kubelet")
kubeletphase.TryStartKubelet()
// Now the kubelet will perform the TLS Bootstrap, transforming /etc/kubernetes/bootstrap-kubelet.conf to /etc/kubernetes/kubelet.conf
@ -161,7 +161,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) error {
return err
}
klog.V(1).Infof("[kubelet-start] preserving the crisocket information for the node")
klog.V(1).Infoln("[kubelet-start] preserving the crisocket information for the node")
if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil {
return errors.Wrap(err, "error uploading crisocket")
}

View File

@ -142,7 +142,7 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I
// Only clear etcd data when using local etcd.
etcdManifestPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, "etcd.yaml")
klog.V(1).Infof("[reset] checking for etcd config")
klog.V(1).Infoln("[reset] checking for etcd config")
etcdDataDir, err := getEtcdDataDir(etcdManifestPath, cfg)
if err == nil {
dirsToClean = append(dirsToClean, etcdDataDir)
@ -157,7 +157,7 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I
}
// Try to stop the kubelet service
klog.V(1).Infof("[reset] getting init system")
klog.V(1).Infoln("[reset] getting init system")
initSystem, err := initsystem.GetInitSystem()
if err != nil {
klog.Warningln("[reset] the kubelet service could not be stopped by kubeadm. Unable to detect a supported init system!")

View File

@ -114,8 +114,8 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command {
func runApply(flags *applyFlags, userVersion string) error {
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
klog.V(1).Infof("[upgrade/apply] verifying health of cluster")
klog.V(1).Infof("[upgrade/apply] retrieving configuration from cluster")
klog.V(1).Infoln("[upgrade/apply] verifying health of cluster")
klog.V(1).Infoln("[upgrade/apply] retrieving configuration from cluster")
client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, flags.dryRun, userVersion)
if err != nil {
return err
@ -127,7 +127,7 @@ func runApply(flags *applyFlags, userVersion string) error {
}
// Validate requested and validate actual version
klog.V(1).Infof("[upgrade/apply] validating requested and actual version")
klog.V(1).Infoln("[upgrade/apply] validating requested and actual version")
if err := configutil.NormalizeKubernetesVersion(&cfg.ClusterConfiguration); err != nil {
return err
}
@ -143,7 +143,7 @@ func runApply(flags *applyFlags, userVersion string) error {
}
// Enforce the version skew policies
klog.V(1).Infof("[upgrade/version] enforcing version skew policies")
klog.V(1).Infoln("[upgrade/version] enforcing version skew policies")
if err := EnforceVersionPolicies(cfg.KubernetesVersion, newK8sVersion, flags, versionGetter); err != nil {
return errors.Wrap(err, "[upgrade/version] FATAL")
}
@ -159,7 +159,7 @@ func runApply(flags *applyFlags, userVersion string) error {
// Use a prepuller implementation based on creating DaemonSets
// and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally
klog.V(1).Infof("[upgrade/apply] creating prepuller")
klog.V(1).Infoln("[upgrade/apply] creating prepuller")
prepuller := upgrade.NewDaemonSetPrepuller(client, waiter, &cfg.ClusterConfiguration)
componentsToPrepull := constants.ControlPlaneComponents
if cfg.Etcd.External == nil && flags.etcdUpgrade {
@ -170,13 +170,13 @@ func runApply(flags *applyFlags, userVersion string) error {
}
// Now; perform the upgrade procedure
klog.V(1).Infof("[upgrade/apply] performing upgrade")
klog.V(1).Infoln("[upgrade/apply] performing upgrade")
if err := PerformControlPlaneUpgrade(flags, client, waiter, cfg); err != nil {
return errors.Wrap(err, "[upgrade/apply] FATAL")
}
// Upgrade RBAC rules and addons.
klog.V(1).Infof("[upgrade/postupgrade] upgrading RBAC rules and addons")
klog.V(1).Infoln("[upgrade/postupgrade] upgrading RBAC rules and addons")
if err := upgrade.PerformPostUpgradeTasks(client, cfg, newK8sVersion, flags.dryRun); err != nil {
return errors.Wrap(err, "[upgrade/postupgrade] FATAL post-upgrade error")
}

View File

@ -64,8 +64,8 @@ func NewCmdPlan(apf *applyPlanFlags) *cobra.Command {
// runPlan takes care of outputting available versions to upgrade to for the user
func runPlan(flags *planFlags, userVersion string) error {
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
klog.V(1).Infof("[upgrade/plan] verifying health of cluster")
klog.V(1).Infof("[upgrade/plan] retrieving configuration from cluster")
klog.V(1).Infoln("[upgrade/plan] verifying health of cluster")
klog.V(1).Infoln("[upgrade/plan] retrieving configuration from cluster")
client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, false, userVersion)
if err != nil {
return err
@ -91,7 +91,7 @@ func runPlan(flags *planFlags, userVersion string) error {
}
// Compute which upgrade possibilities there are
klog.V(1).Infof("[upgrade/plan] computing upgrade possibilities")
klog.V(1).Infoln("[upgrade/plan] computing upgrade possibilities")
availUpgrades, err := upgrade.GetAvailableUpgrades(versionGetter, flags.allowExperimentalUpgrades, flags.allowRCUpgrades, etcdClient, cfg.DNS.Type, client)
if err != nil {
return errors.Wrap(err, "[upgrade/versions] FATAL")

View File

@ -409,7 +409,7 @@ func (HostnameCheck) Name() string {
// Check validates if hostname match dns sub domain regex.
func (hc HostnameCheck) Check() (warnings, errorList []error) {
klog.V(1).Infof("checking whether the given node name is reachable using net.LookupHost")
klog.V(1).Infoln("checking whether the given node name is reachable using net.LookupHost")
errorList = []error{}
warnings = []error{}
addr, err := net.LookupHost(hc.nodeName)
@ -436,7 +436,7 @@ func (hst HTTPProxyCheck) Name() string {
// Check validates http connectivity type, direct or via proxy.
func (hst HTTPProxyCheck) Check() (warnings, errorList []error) {
klog.V(1).Infof("validating if the connectivity type is via proxy or direct")
klog.V(1).Infoln("validating if the connectivity type is via proxy or direct")
u := (&url.URL{Scheme: hst.Proto, Host: hst.Host}).String()
req, err := http.NewRequest("GET", u, nil)