Merge pull request #128457 from neolit123/1.31-improve-dry-run-logic

kubeadm: support dryrunning upgrade without a real cluster
This commit is contained in:
Kubernetes Prow Robot 2024-10-31 15:21:33 +00:00 committed by GitHub
commit 50998de605
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 373 additions and 71 deletions

View File

@ -80,7 +80,7 @@ func runPreflight(c workflow.RunData) error {
// Run healthchecks against the cluster.
klog.V(1).Infoln("[upgrade/preflight] Verifying the cluster health")
if err := upgrade.CheckClusterHealth(client, &initCfg.ClusterConfiguration, ignorePreflightErrors, printer); err != nil {
if err := upgrade.CheckClusterHealth(client, &initCfg.ClusterConfiguration, ignorePreflightErrors, data.DryRun(), printer); err != nil {
return err
}

View File

@ -223,13 +223,13 @@ func newApplyData(cmd *cobra.Command, args []string, applyFlags *applyFlags) (*a
return nil, cmdutil.TypeMismatchErr("printConfig", "bool")
}
client, err := getClient(applyFlags.kubeConfigPath, *dryRun)
printer := &output.TextPrinter{}
client, err := getClient(applyFlags.kubeConfigPath, *dryRun, printer)
if err != nil {
return nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", applyFlags.kubeConfigPath)
}
printer := &output.TextPrinter{}
// Fetches the cluster configuration.
klog.V(1).Infoln("[upgrade] retrieving configuration from cluster")
initCfg, err := configutil.FetchInitConfigurationFromCluster(client, nil, "upgrade", false, false)

View File

@ -21,6 +21,7 @@ import (
"bytes"
"io"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/spf13/pflag"
@ -71,7 +72,7 @@ func enforceRequirements(flagSet *pflag.FlagSet, flags *applyPlanFlags, args []s
}
}
client, err := getClient(flags.kubeConfigPath, *isDryRun)
client, err := getClient(flags.kubeConfigPath, *isDryRun, printer)
if err != nil {
return nil, nil, nil, nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath)
}
@ -137,7 +138,7 @@ func enforceRequirements(flagSet *pflag.FlagSet, flags *applyPlanFlags, args []s
}
// Run healthchecks against the cluster
if err := upgrade.CheckClusterHealth(client, &initCfg.ClusterConfiguration, ignorePreflightErrorsSet, printer); err != nil {
if err := upgrade.CheckClusterHealth(client, &initCfg.ClusterConfiguration, ignorePreflightErrorsSet, dryRun, printer); err != nil {
return nil, nil, nil, nil, errors.Wrap(err, "[upgrade/health] FATAL")
}
@ -189,32 +190,57 @@ func runPreflightChecks(client clientset.Interface, ignorePreflightErrors sets.S
}
// getClient gets a real or fake client depending on whether the user is dry-running or not
func getClient(file string, dryRun bool) (clientset.Interface, error) {
func getClient(file string, dryRun bool, printer output.Printer) (clientset.Interface, error) {
if dryRun {
// Default the server version to the kubeadm version.
serverVersion := constants.CurrentKubernetesVersion.Info()
dryRun := apiclient.NewDryRun()
if err := dryRun.WithKubeConfigFile(file); err != nil {
return nil, err
}
dryRun.WithDefaultMarshalFunction().
WithWriter(os.Stdout).
PrependReactor(dryRun.HealthCheckJobReactor()).
PrependReactor(dryRun.PatchNodeReactor())
// In order for fakeclient.Discovery().ServerVersion() to return the backing API Server's
// real version; we have to do some clever API machinery tricks. First, we get the real
// API Server's version.
realServerVersion, err := dryRun.Client().Discovery().ServerVersion()
if err != nil {
return nil, errors.Wrap(err, "failed to get server version")
// If the kubeconfig exists, construct a real client from it and get the real serverVersion.
if _, err := os.Stat(file); err == nil {
_, _ = printer.Printf("[dryrun] Creating a real client from %q\n", file)
if err := dryRun.WithKubeConfigFile(file); err != nil {
return nil, err
}
serverVersion, err = dryRun.Client().Discovery().ServerVersion()
if err != nil {
return nil, errors.Wrap(err, "failed to get server version")
}
} else if os.IsNotExist(err) {
// If the file (supposedly admin.conf) does not exist, add more reactors.
// Knowing the node name is required by the ListPodsReactor. For that we try to use
// the kubelet.conf client, if it exists. If not, it falls back to hostname.
_, _ = printer.Printf("[dryrun] Dryrunning without a real client\n")
kubeconfigPath := filepath.Join(constants.KubernetesDir, constants.KubeletKubeConfigFileName)
nodeName, err := configutil.GetNodeName(kubeconfigPath)
if err != nil {
return nil, err
}
dryRun.PrependReactor(dryRun.GetKubeadmConfigReactor()).
PrependReactor(dryRun.GetKubeletConfigReactor()).
PrependReactor(dryRun.GetKubeProxyConfigReactor()).
PrependReactor(dryRun.GetNodeReactor()).
PrependReactor(dryRun.ListPodsReactor(nodeName)).
PrependReactor(dryRun.GetCoreDNSConfigReactor()).
PrependReactor(dryRun.ListDeploymentsReactor())
} else {
// Throw an error if the file exists but there was a different stat error.
return nil, errors.Wrapf(err, "could not create a client from %q", file)
}
// Obtain the FakeDiscovery object for this fake client.
fakeClient := dryRun.FakeClient()
fakeClientDiscovery, ok := fakeClient.Discovery().(*fakediscovery.FakeDiscovery)
if !ok {
return nil, errors.New("could not set fake discovery's server version")
}
// Lastly, set the right server version to be used.
fakeClientDiscovery.FakedServerVersion = realServerVersion
// Set the right server version for it.
fakeClientDiscovery.FakedServerVersion = serverVersion
return fakeClient, nil
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package upgrade
import (
"fmt"
"io"
"os"
@ -26,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4"
@ -37,6 +39,7 @@ import (
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
"k8s.io/kubernetes/cmd/kubeadm/app/util/output"
)
// nodeOptions defines all the options exposed via flags by kubeadm upgrade node.
@ -84,7 +87,15 @@ func newCmdNode(out io.Writer) *cobra.Command {
return err
}
return nodeRunner.Run(args)
if err := nodeRunner.Run(args); err != nil {
return err
}
if nodeOptions.dryRun {
fmt.Println("[upgrade/successful] Finished dryrunning successfully!")
return nil
}
return nil
},
Args: cobra.NoArgs,
}
@ -150,6 +161,7 @@ func newNodeData(cmd *cobra.Command, nodeOptions *nodeOptions, out io.Writer) (*
isControlPlaneNode := true
filepath := constants.GetStaticPodFilepath(constants.KubeAPIServer, constants.GetStaticPodDirectory())
if _, err := os.Stat(filepath); os.IsNotExist(err) {
klog.V(1).Infof("assuming this is not a control plane node because %q is missing", filepath)
isControlPlaneNode = false
}
if len(nodeOptions.kubeConfigPath) == 0 {
@ -171,7 +183,9 @@ func newNodeData(cmd *cobra.Command, nodeOptions *nodeOptions, out io.Writer) (*
if !ok {
return nil, cmdutil.TypeMismatchErr("dryRun", "bool")
}
client, err := getClient(nodeOptions.kubeConfigPath, *dryRun)
printer := &output.TextPrinter{}
client, err := getClient(nodeOptions.kubeConfigPath, *dryRun, printer)
if err != nil {
return nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", nodeOptions.kubeConfigPath)
}

View File

@ -46,16 +46,18 @@ const createJobHealthCheckPrefix = "upgrade-health-check"
// healthCheck is a helper struct for easily performing healthchecks against the cluster and printing the output
type healthCheck struct {
name string
client clientset.Interface
cfg *kubeadmapi.ClusterConfiguration
name string
client clientset.Interface
cfg *kubeadmapi.ClusterConfiguration
dryRun bool
printer output.Printer
// f is invoked with a k8s client and a kubeadm ClusterConfiguration passed to it. Should return an optional error
f func(clientset.Interface, *kubeadmapi.ClusterConfiguration) error
f func(clientset.Interface, *kubeadmapi.ClusterConfiguration, bool, output.Printer) error
}
// Check is part of the preflight.Checker interface
func (c *healthCheck) Check() (warnings, errors []error) {
if err := c.f(c.client, c.cfg); err != nil {
if err := c.f(c.client, c.cfg, c.dryRun, c.printer); err != nil {
return nil, []error{err}
}
return nil, nil
@ -70,24 +72,30 @@ func (c *healthCheck) Name() string {
// - the cluster can accept a workload
// - all control-plane Nodes are Ready
// - (if static pod-hosted) that all required Static Pod manifests exist on disk
func CheckClusterHealth(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration, ignoreChecksErrors sets.Set[string], printer output.Printer) error {
func CheckClusterHealth(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration, ignoreChecksErrors sets.Set[string], dryRun bool, printer output.Printer) error {
_, _ = printer.Println("[upgrade] Running cluster health checks")
healthChecks := []preflight.Checker{
&healthCheck{
name: "CreateJob",
client: client,
cfg: cfg,
f: createJob,
name: "CreateJob",
client: client,
cfg: cfg,
f: createJob,
dryRun: dryRun,
printer: printer,
},
&healthCheck{
name: "ControlPlaneNodesReady",
client: client,
f: controlPlaneNodesReady,
name: "ControlPlaneNodesReady",
client: client,
f: controlPlaneNodesReady,
dryRun: dryRun,
printer: printer,
},
&healthCheck{
name: "StaticPodManifest",
f: staticPodManifestHealth,
name: "StaticPodManifest",
f: staticPodManifestHealth,
dryRun: dryRun,
printer: printer,
},
}
@ -95,7 +103,7 @@ func CheckClusterHealth(client clientset.Interface, cfg *kubeadmapi.ClusterConfi
}
// createJob is a check that verifies that a Job can be created in the cluster
func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) error {
func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration, _ bool, _ output.Printer) error {
const (
fieldSelector = "spec.unschedulable=false"
ns = metav1.NamespaceSystem
@ -213,7 +221,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
}
// controlPlaneNodesReady checks whether all control-plane Nodes in the cluster are in the Running state
func controlPlaneNodesReady(client clientset.Interface, _ *kubeadmapi.ClusterConfiguration) error {
func controlPlaneNodesReady(client clientset.Interface, _ *kubeadmapi.ClusterConfiguration, _ bool, _ output.Printer) error {
selectorControlPlane := labels.SelectorFromSet(map[string]string{
constants.LabelNodeRoleControlPlane: "",
})
@ -232,10 +240,14 @@ func controlPlaneNodesReady(client clientset.Interface, _ *kubeadmapi.ClusterCon
}
// staticPodManifestHealth makes sure the required static pods are presents
func staticPodManifestHealth(_ clientset.Interface, _ *kubeadmapi.ClusterConfiguration) error {
func staticPodManifestHealth(_ clientset.Interface, _ *kubeadmapi.ClusterConfiguration, dryRun bool, printer output.Printer) error {
var nonExistentManifests []string
for _, component := range constants.ControlPlaneComponents {
manifestFile := constants.GetStaticPodFilepath(component, constants.GetStaticPodDirectory())
if dryRun {
_, _ = printer.Printf("[dryrun] would check if %s exists\n", manifestFile)
continue
}
if _, err := os.Stat(manifestFile); os.IsNotExist(err) {
nonExistentManifests = append(nonExistentManifests, manifestFile)
}
@ -243,7 +255,7 @@ func staticPodManifestHealth(_ clientset.Interface, _ *kubeadmapi.ClusterConfigu
if len(nonExistentManifests) == 0 {
return nil
}
return errors.Errorf("The control plane seems to be Static Pod-hosted, but some of the manifests don't seem to exist on disk. This probably means you're running 'kubeadm upgrade' on a remote machine, which is not supported for a Static Pod-hosted cluster. Manifest files not found: %v", nonExistentManifests)
return errors.Errorf("manifest files not found: %v", nonExistentManifests)
}
// getNotReadyNodes returns a string slice of nodes in the cluster that are NotReady

View File

@ -113,11 +113,6 @@ func (w *fakeWaiter) WaitForPodsWithLabel(kvLabel string) error {
return w.errsToReturn[waitForPodsWithLabel]
}
// WaitForPodToDisappear just returns a dummy nil, to indicate that the program should just proceed
func (w *fakeWaiter) WaitForPodToDisappear(podName string) error {
return nil
}
// SetTimeout is a no-op; we don't use it in this implementation
func (w *fakeWaiter) SetTimeout(_ time.Duration) {}

View File

@ -27,6 +27,7 @@ import (
"github.com/lithammer/dedent"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -425,7 +426,6 @@ func (d *DryRun) GetKubeadmCertsReactor() *testing.SimpleReactor {
if a.GetName() != constants.KubeadmCertsSecret || a.GetNamespace() != metav1.NamespaceSystem {
return false, nil, nil
}
obj := getKubeadmCertsSecret()
d.LogObject(obj, action.GetResource().GroupVersion())
return true, obj, nil
@ -469,6 +469,24 @@ func (d *DryRun) GetKubeProxyConfigReactor() *testing.SimpleReactor {
}
}
// GetCoreDNSConfigReactor returns a reactor that handles the GET action of the "coredns"
// ConfigMap.
func (d *DryRun) GetCoreDNSConfigReactor() *testing.SimpleReactor {
return &testing.SimpleReactor{
Verb: "get",
Resource: "configmaps",
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
a := action.(testing.GetAction)
if a.GetName() != constants.CoreDNSConfigMap || a.GetNamespace() != metav1.NamespaceSystem {
return false, nil, nil
}
obj := getCoreDNSConfigMap()
d.LogObject(obj, action.GetResource().GroupVersion())
return true, obj, nil
},
}
}
// DeleteBootstrapTokenReactor returns a reactor that handles the DELETE action
// of bootstrap token Secret.
func (d *DryRun) DeleteBootstrapTokenReactor() *testing.SimpleReactor {
@ -492,6 +510,40 @@ func (d *DryRun) DeleteBootstrapTokenReactor() *testing.SimpleReactor {
}
}
// ListPodsReactor returns a reactor that handles the LIST action on pods.
func (d *DryRun) ListPodsReactor(nodeName string) *testing.SimpleReactor {
return &testing.SimpleReactor{
Verb: "list",
Resource: "pods",
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
a := action.(testing.ListAction)
if a.GetNamespace() != metav1.NamespaceSystem {
return false, nil, nil
}
obj := getPodList(nodeName)
d.LogObject(obj, action.GetResource().GroupVersion())
return true, obj, nil
},
}
}
// ListDeploymentsReactor returns a reactor that handles the LIST action on deployments.
func (d *DryRun) ListDeploymentsReactor() *testing.SimpleReactor {
return &testing.SimpleReactor{
Verb: "list",
Resource: "deployments",
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
a := action.(testing.ListAction)
if a.GetNamespace() != metav1.NamespaceSystem {
return false, nil, nil
}
obj := getDeploymentList()
d.LogObject(obj, action.GetResource().GroupVersion())
return true, obj, nil
},
}
}
// getJob returns a fake Job object.
func getJob(namespace, name string) *batchv1.Job {
return &batchv1.Job{
@ -515,7 +567,9 @@ func getNode(name string) *corev1.Node {
Labels: map[string]string{
"kubernetes.io/hostname": name,
},
Annotations: map[string]string{},
Annotations: map[string]string{
"kubeadm.alpha.kubernetes.io/cri-socket": "dry-run-cri-socket",
},
},
}
}
@ -680,3 +734,79 @@ users:
}
return getConfigMap(metav1.NamespaceSystem, constants.KubeProxyConfigMap, data)
}
// getCoreDNSConfigMap returns a fake "coredns" ConfigMap.
func getCoreDNSConfigMap() *corev1.ConfigMap {
data := map[string]string{
"Corefile": "",
}
return getConfigMap(metav1.NamespaceSystem, constants.CoreDNSConfigMap, data)
}
// getPod returns a fake Pod.
func getPod(name, nodeName string) corev1.Pod {
return corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name + "-" + nodeName,
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
"component": name,
"tier": constants.ControlPlaneTier,
},
Annotations: map[string]string{
constants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "127.0.0.1:6443",
},
},
Spec: corev1.PodSpec{
NodeName: nodeName,
Containers: []corev1.Container{
{
Name: name,
Image: "registry.k8s.io/" + name + ":v1.1.1",
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
}
// getPodList returns a list of fake pods.
func getPodList(nodeName string) *corev1.PodList {
return &corev1.PodList{
Items: []corev1.Pod{
getPod(constants.KubeAPIServer, nodeName),
getPod(constants.KubeControllerManager, nodeName),
getPod(constants.KubeScheduler, nodeName),
getPod(constants.Etcd, nodeName),
},
}
}
// getDeploymentList returns a fake list of deployments.
func getDeploymentList() *appsv1.DeploymentList {
return &appsv1.DeploymentList{
Items: []appsv1.Deployment{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
"k8s-app": "kube-dns",
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "registry.k8s.io/coredns/coredns:" + constants.CoreDNSVersion,
},
},
},
},
},
},
},
}
}

View File

@ -315,6 +315,35 @@ func TestReactors(t *testing.T) {
},
},
},
{
name: "GetCoreDNSConfigReactor",
setup: func(d *DryRun) {
d.PrependReactor((d.GetCoreDNSConfigReactor()))
},
apiCall: func(d *DryRun, namespace, name string) error {
obj, err := d.FakeClient().CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return err
}
expectedObj := getCoreDNSConfigMap()
if diff := cmp.Diff(expectedObj, obj); diff != "" {
return errors.Errorf("object differs (-want,+got):\n%s", diff)
}
return nil
},
apiCallCases: []apiCallCase{
{
name: "foo",
namespace: "bar",
expectedError: true,
},
{
name: "coredns",
namespace: metav1.NamespaceSystem,
expectedError: false,
},
},
},
{
name: "DeleteBootstrapTokenReactor",
setup: func(d *DryRun) {
@ -340,6 +369,89 @@ func TestReactors(t *testing.T) {
},
},
},
{
name: "GetKubeadmCertsReactor",
setup: func(d *DryRun) {
d.PrependReactor((d.GetKubeadmCertsReactor()))
},
apiCall: func(d *DryRun, namespace, name string) error {
obj, err := d.FakeClient().CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return err
}
expectedObj := getKubeadmCertsSecret()
if diff := cmp.Diff(expectedObj, obj); diff != "" {
return errors.Errorf("object differs (-want,+got):\n%s", diff)
}
return nil
},
apiCallCases: []apiCallCase{
{
name: "foo",
namespace: "bar",
expectedError: true,
},
{
name: "kubeadm-certs",
namespace: metav1.NamespaceSystem,
expectedError: false,
},
},
},
{
name: "ListPodsReactor",
setup: func(d *DryRun) {
d.PrependReactor((d.ListPodsReactor("foo")))
},
apiCall: func(d *DryRun, namespace, name string) error {
obj, err := d.FakeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
expectedObj := getPodList("foo")
if diff := cmp.Diff(expectedObj, obj); diff != "" {
return errors.Errorf("object differs (-want,+got):\n%s", diff)
}
return nil
},
apiCallCases: []apiCallCase{
{
namespace: "bar",
expectedError: true,
},
{
namespace: metav1.NamespaceSystem,
expectedError: false,
},
},
},
{
name: "ListDeploymentsReactor",
setup: func(d *DryRun) {
d.PrependReactor((d.ListDeploymentsReactor()))
},
apiCall: func(d *DryRun, namespace, name string) error {
obj, err := d.FakeClient().AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
expectedObj := getDeploymentList()
if diff := cmp.Diff(expectedObj, obj); diff != "" {
return errors.Errorf("object differs (-want,+got):\n%s", diff)
}
return nil
},
apiCallCases: []apiCallCase{
{
namespace: "bar",
expectedError: true,
},
{
namespace: metav1.NamespaceSystem,
expectedError: false,
},
},
},
}
for _, tc := range tests {

View File

@ -27,7 +27,6 @@ import (
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
netutil "k8s.io/apimachinery/pkg/util/net"
@ -48,8 +47,6 @@ type Waiter interface {
WaitForAPI() error
// WaitForPodsWithLabel waits for Pods in the kube-system namespace to become Ready
WaitForPodsWithLabel(kvLabel string) error
// WaitForPodToDisappear waits for the given Pod in the kube-system namespace to be deleted
WaitForPodToDisappear(staticPodName string) error
// WaitForStaticPodSingleHash fetches sha256 hash for the control plane static pod
WaitForStaticPodSingleHash(nodeName string, component string) (string, error)
// WaitForStaticPodHashChange waits for the given static pod component's static pod hash to get updated.
@ -228,20 +225,6 @@ func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error {
})
}
// WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question
func (w *KubeWaiter) WaitForPodToDisappear(podName string) error {
return wait.PollUntilContextTimeout(context.Background(),
constants.KubernetesAPICallRetryInterval, w.timeout,
true, func(_ context.Context) (bool, error) {
_, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
fmt.Printf("[apiclient] The old Pod %q is now removed (which is desired)\n", podName)
return true, nil
}
return false, nil
})
}
// WaitForKubelet blocks until the kubelet /healthz endpoint returns 'ok'.
func (w *KubeWaiter) WaitForKubelet(healthzAddress string, healthzPort int32) error {
var (

View File

@ -26,6 +26,7 @@ import (
"github.com/pkg/errors"
authv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
@ -33,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -42,6 +44,7 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/cmd/kubeadm/app/util/config/strict"
"k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/cmd/kubeadm/app/util/output"
)
@ -122,12 +125,45 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte
return initcfg, nil
}
// GetNodeName uses 3 different approaches for getting the node name.
// First it attempts to construct a client from the given kubeconfig file
// and get the SelfSubjectReview review for it - i.e. like "kubectl auth whoami".
// If that fails it attempt to parse the kubeconfig client certificate subject.
// Finally, it falls back to using the host name, which might not always be correct
// due to node name overrides.
func GetNodeName(kubeconfigFile string) (string, error) {
var (
nodeName string
err error
)
if kubeconfigFile != "" {
client, err := kubeconfig.ClientSetFromFile(kubeconfigFile)
if err == nil {
ssr, err := client.AuthenticationV1().SelfSubjectReviews().
Create(context.Background(), &authv1.SelfSubjectReview{}, metav1.CreateOptions{})
if err == nil && ssr.Status.UserInfo.Username != "" {
return ssr.Status.UserInfo.Username, nil
}
}
nodeName, err = getNodeNameFromKubeletConfig(kubeconfigFile)
if err == nil {
return nodeName, nil
}
}
nodeName, err = nodeutil.GetHostname("")
if err != nil {
return "", errors.Wrapf(err, "could not get node name")
}
return nodeName, nil
}
// GetNodeRegistration returns the nodeRegistration for the current node
func GetNodeRegistration(kubeconfigFile string, client clientset.Interface, nodeRegistration *kubeadmapi.NodeRegistrationOptions) error {
// gets the name of the current node
nodeName, err := getNodeNameFromKubeletConfig(kubeconfigFile)
nodeName, err := GetNodeName(kubeconfigFile)
if err != nil {
return errors.Wrap(err, "failed to get node name from kubelet config")
return err
}
// gets the corresponding node and retrieves attributes stored there.

View File

@ -106,12 +106,6 @@ func (w *Waiter) WaitForPodsWithLabel(kvLabel string) error {
return nil
}
// WaitForPodToDisappear just returns a dummy nil, to indicate that the program should just proceed
func (w *Waiter) WaitForPodToDisappear(podName string) error {
fmt.Printf("[dryrun] Would wait for the %q Pod in the %s namespace to be deleted\n", podName, metav1.NamespaceSystem)
return nil
}
// WaitForKubelet blocks until the kubelet /healthz endpoint returns 'ok'
func (w *Waiter) WaitForKubelet(healthzAddress string, healthzPort int32) error {
fmt.Printf("[dryrun] Would make sure the kubelet returns 'ok' at http://%s:%d/healthz\n", healthzAddress, healthzPort)