mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #130040 from HirazawaUi/make-error-consistent
kubeadm: make kubeadm init and join output the same error
This commit is contained in:
commit
e30c8a3dde
@ -19,10 +19,8 @@ package phases
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"text/template"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lithammer/dedent"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -38,28 +36,6 @@ import (
|
|||||||
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
|
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
kubeletFailTempl = template.Must(template.New("init").Parse(dedent.Dedent(`
|
|
||||||
Unfortunately, an error has occurred:
|
|
||||||
{{ .Error }}
|
|
||||||
|
|
||||||
This error is likely caused by:
|
|
||||||
- The kubelet is not running
|
|
||||||
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
|
|
||||||
|
|
||||||
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
|
|
||||||
- 'systemctl status kubelet'
|
|
||||||
- 'journalctl -xeu kubelet'
|
|
||||||
|
|
||||||
Additionally, a control plane component may have crashed or exited when started by the container runtime.
|
|
||||||
To troubleshoot, list all containers using your preferred container runtimes CLI.
|
|
||||||
Here is one example how you may list all running Kubernetes containers by using crictl:
|
|
||||||
- 'crictl --runtime-endpoint {{ .Socket }} ps -a | grep kube | grep -v pause'
|
|
||||||
Once you have found the failing container, you can inspect its logs with:
|
|
||||||
- 'crictl --runtime-endpoint {{ .Socket }} logs CONTAINERID'
|
|
||||||
`)))
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewWaitControlPlanePhase is a hidden phase that runs after the control-plane and etcd phases
|
// NewWaitControlPlanePhase is a hidden phase that runs after the control-plane and etcd phases
|
||||||
func NewWaitControlPlanePhase() workflow.Phase {
|
func NewWaitControlPlanePhase() workflow.Phase {
|
||||||
phase := workflow.Phase{
|
phase := workflow.Phase{
|
||||||
@ -102,19 +78,6 @@ func runWaitControlPlanePhase(c workflow.RunData) error {
|
|||||||
" from directory %q\n",
|
" from directory %q\n",
|
||||||
data.ManifestDir())
|
data.ManifestDir())
|
||||||
|
|
||||||
handleError := func(err error) error {
|
|
||||||
context := struct {
|
|
||||||
Error string
|
|
||||||
Socket string
|
|
||||||
}{
|
|
||||||
Error: fmt.Sprintf("%v", err),
|
|
||||||
Socket: data.Cfg().NodeRegistration.CRISocket,
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeletFailTempl.Execute(data.OutputWriter(), context)
|
|
||||||
return errors.New("could not initialize a Kubernetes cluster")
|
|
||||||
}
|
|
||||||
|
|
||||||
waiter.SetTimeout(data.Cfg().Timeouts.KubeletHealthCheck.Duration)
|
waiter.SetTimeout(data.Cfg().Timeouts.KubeletHealthCheck.Duration)
|
||||||
kubeletConfig := data.Cfg().ClusterConfiguration.ComponentConfigs[componentconfigs.KubeletGroup].Get()
|
kubeletConfig := data.Cfg().ClusterConfiguration.ComponentConfigs[componentconfigs.KubeletGroup].Get()
|
||||||
kubeletConfigTyped, ok := kubeletConfig.(*kubeletconfig.KubeletConfiguration)
|
kubeletConfigTyped, ok := kubeletConfig.(*kubeletconfig.KubeletConfiguration)
|
||||||
@ -122,7 +85,8 @@ func runWaitControlPlanePhase(c workflow.RunData) error {
|
|||||||
return errors.New("could not convert the KubeletConfiguration to a typed object")
|
return errors.New("could not convert the KubeletConfiguration to a typed object")
|
||||||
}
|
}
|
||||||
if err := waiter.WaitForKubelet(kubeletConfigTyped.HealthzBindAddress, *kubeletConfigTyped.HealthzPort); err != nil {
|
if err := waiter.WaitForKubelet(kubeletConfigTyped.HealthzBindAddress, *kubeletConfigTyped.HealthzPort); err != nil {
|
||||||
return handleError(err)
|
apiclient.PrintKubeletErrorHelpScreen(data.OutputWriter())
|
||||||
|
return errors.Wrap(err, "failed while waiting for the kubelet to start")
|
||||||
}
|
}
|
||||||
|
|
||||||
var podMap map[string]*v1.Pod
|
var podMap map[string]*v1.Pod
|
||||||
@ -138,7 +102,8 @@ func runWaitControlPlanePhase(c workflow.RunData) error {
|
|||||||
err = waiter.WaitForAPI()
|
err = waiter.WaitForAPI()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError(err)
|
apiclient.PrintControlPlaneErrorHelpScreen(data.OutputWriter(), data.Cfg().NodeRegistration.CRISocket)
|
||||||
|
return errors.Wrap(err, "failed while waiting for the control plane to start")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lithammer/dedent"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -50,21 +49,6 @@ import (
|
|||||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
kubeadmJoinFailMsg = dedent.Dedent(`
|
|
||||||
Unfortunately, an error has occurred:
|
|
||||||
%v
|
|
||||||
|
|
||||||
This error is likely caused by:
|
|
||||||
- The kubelet is not running
|
|
||||||
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
|
|
||||||
|
|
||||||
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
|
|
||||||
- 'systemctl status kubelet'
|
|
||||||
- 'journalctl -xeu kubelet'
|
|
||||||
`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewKubeletStartPhase creates a kubeadm workflow phase that start kubelet on a node.
|
// NewKubeletStartPhase creates a kubeadm workflow phase that start kubelet on a node.
|
||||||
func NewKubeletStartPhase() workflow.Phase {
|
func NewKubeletStartPhase() workflow.Phase {
|
||||||
return workflow.Phase{
|
return workflow.Phase{
|
||||||
@ -328,13 +312,13 @@ func runKubeletWaitBootstrapPhase(c workflow.RunData) (returnErr error) {
|
|||||||
return errors.New("could not convert the KubeletConfiguration to a typed object")
|
return errors.New("could not convert the KubeletConfiguration to a typed object")
|
||||||
}
|
}
|
||||||
if err := waiter.WaitForKubelet(kubeletConfigTyped.HealthzBindAddress, *kubeletConfigTyped.HealthzPort); err != nil {
|
if err := waiter.WaitForKubelet(kubeletConfigTyped.HealthzBindAddress, *kubeletConfigTyped.HealthzPort); err != nil {
|
||||||
fmt.Printf(kubeadmJoinFailMsg, err)
|
apiclient.PrintKubeletErrorHelpScreen(data.OutputWriter())
|
||||||
return err
|
return errors.Wrap(err, "failed while waiting for the kubelet to start")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := waitForTLSBootstrappedClient(cfg.Timeouts.TLSBootstrap.Duration); err != nil {
|
if err := waitForTLSBootstrappedClient(cfg.Timeouts.TLSBootstrap.Duration); err != nil {
|
||||||
fmt.Printf(kubeadmJoinFailMsg, err)
|
apiclient.PrintKubeletErrorHelpScreen(data.OutputWriter())
|
||||||
return err
|
return errors.Wrap(err, "failed while waiting for TLS bootstrap")
|
||||||
}
|
}
|
||||||
|
|
||||||
// When we know the /etc/kubernetes/kubelet.conf file is available, get the client
|
// When we know the /etc/kubernetes/kubelet.conf file is available, get the client
|
||||||
|
@ -80,7 +80,8 @@ func runWaitControlPlanePhase(c workflow.RunData) error {
|
|||||||
}
|
}
|
||||||
if err = waiter.WaitForControlPlaneComponents(pods,
|
if err = waiter.WaitForControlPlaneComponents(pods,
|
||||||
data.Cfg().ControlPlane.LocalAPIEndpoint.AdvertiseAddress); err != nil {
|
data.Cfg().ControlPlane.LocalAPIEndpoint.AdvertiseAddress); err != nil {
|
||||||
return err
|
apiclient.PrintControlPlaneErrorHelpScreen(data.OutputWriter(), data.Cfg().NodeRegistration.CRISocket)
|
||||||
|
return errors.Wrap(err, "failed while waiting for the control plane to start")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -18,7 +18,6 @@ package kubelet
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||||
|
@ -24,8 +24,10 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/lithammer/dedent"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -52,6 +54,27 @@ const (
|
|||||||
argAdvertiseAddress = "advertise-address"
|
argAdvertiseAddress = "advertise-address"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
controlPlaneFailTempl = template.Must(template.New("init").Parse(dedent.Dedent(`
|
||||||
|
A control plane component may have crashed or exited when started by the container runtime.
|
||||||
|
To troubleshoot, list all containers using your preferred container runtimes CLI.
|
||||||
|
Here is one example how you may list all running Kubernetes containers by using crictl:
|
||||||
|
- 'crictl --runtime-endpoint {{ .Socket }} ps -a | grep kube | grep -v pause'
|
||||||
|
Once you have found the failing container, you can inspect its logs with:
|
||||||
|
- 'crictl --runtime-endpoint {{ .Socket }} logs CONTAINERID'
|
||||||
|
`)))
|
||||||
|
|
||||||
|
kubeletFailMsg = dedent.Dedent(`
|
||||||
|
Unfortunately, an error has occurred, likely caused by:
|
||||||
|
- The kubelet is not running
|
||||||
|
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
|
||||||
|
|
||||||
|
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
|
||||||
|
- 'systemctl status kubelet'
|
||||||
|
- 'journalctl -xeu kubelet'
|
||||||
|
`)
|
||||||
|
)
|
||||||
|
|
||||||
// Waiter is an interface for waiting for criteria in Kubernetes to happen
|
// Waiter is an interface for waiting for criteria in Kubernetes to happen
|
||||||
type Waiter interface {
|
type Waiter interface {
|
||||||
// WaitForControlPlaneComponents waits for all control plane components to be ready.
|
// WaitForControlPlaneComponents waits for all control plane components to be ready.
|
||||||
@ -496,3 +519,19 @@ func getStaticPodSingleHash(client clientset.Interface, nodeName string, compone
|
|||||||
staticPodHash := staticPod.Annotations["kubernetes.io/config.hash"]
|
staticPodHash := staticPod.Annotations["kubernetes.io/config.hash"]
|
||||||
return staticPodHash, nil
|
return staticPodHash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintControlPlaneErrorHelpScreen prints help text on wait ControlPlane components errors.
|
||||||
|
func PrintControlPlaneErrorHelpScreen(outputWriter io.Writer, criSocket string) {
|
||||||
|
context := struct {
|
||||||
|
Socket string
|
||||||
|
}{
|
||||||
|
Socket: criSocket,
|
||||||
|
}
|
||||||
|
_ = controlPlaneFailTempl.Execute(outputWriter, context)
|
||||||
|
fmt.Println("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintKubeletErrorHelpScreen prints help text on kubelet errors.
|
||||||
|
func PrintKubeletErrorHelpScreen(outputWriter io.Writer) {
|
||||||
|
fmt.Fprintln(outputWriter, kubeletFailMsg)
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user