mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #78915 from ereslibre/retry-configmap-get-on-unauthorized
kubeadm: Add ability to retry ConfigMap get if certain errors happen
This commit is contained in:
commit
4f29960cb2
@ -16,6 +16,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/config/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/config/validation:go_default_library",
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
|
||||
)
|
||||
@ -34,7 +35,7 @@ func GetFromKubeletConfigMap(client clientset.Interface, version *version.Versio
|
||||
|
||||
// Read the ConfigMap from the cluster based on what version the kubelet is
|
||||
configMapName := kubeadmconstants.GetKubeletConfigMapName(version)
|
||||
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
||||
kubeletCfg, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -60,7 +61,7 @@ func GetFromKubeletConfigMap(client clientset.Interface, version *version.Versio
|
||||
func GetFromKubeProxyConfigMap(client clientset.Interface, version *version.Version) (runtime.Object, error) {
|
||||
|
||||
// Read the ConfigMap from the cluster
|
||||
kubeproxyCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeProxyConfigMap, metav1.GetOptions{})
|
||||
kubeproxyCfg, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, kubeadmconstants.KubeProxyConfigMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -130,7 +130,7 @@ func DownloadConfig(client clientset.Interface, kubeletVersion *version.Version,
|
||||
fmt.Printf("[kubelet-start] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n",
|
||||
configMapName, metav1.NamespaceSystem)
|
||||
|
||||
kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
||||
kubeletCfg, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName)
|
||||
// If the ConfigMap wasn't found and the kubelet version is v1.10.x, where we didn't support the config file yet
|
||||
// just return, don't error out
|
||||
if apierrors.IsNotFound(err) && kubeletVersion.Minor() == 10 {
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -38,7 +38,7 @@ func EnableDynamicConfigForNode(client clientset.Interface, nodeName string, kub
|
||||
nodeName, configMapName, metav1.NamespaceSystem)
|
||||
fmt.Println("[kubelet] WARNING: The Dynamic Kubelet Config feature is beta, but off by default. It hasn't been well-tested yet at this stage, use with caution.")
|
||||
|
||||
_, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
||||
_, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, configMapName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get the kubelet configuration ConfigMap")
|
||||
}
|
||||
|
@ -293,3 +293,25 @@ func PatchNode(client clientset.Interface, nodeName string, patchFn func(*v1.Nod
|
||||
// then the retries end and the error is returned.
|
||||
return wait.Poll(constants.APICallRetryInterval, constants.PatchNodeTimeout, PatchNodeOnce(client, nodeName, patchFn))
|
||||
}
|
||||
|
||||
// GetConfigMapWithRetry tries to retrieve a ConfigMap using the given client,
|
||||
// retrying if we get an unexpected error.
|
||||
//
|
||||
// TODO: evaluate if this can be done better. Potentially remove the retry if feasible.
|
||||
func GetConfigMapWithRetry(client clientset.Interface, namespace, name string) (*v1.ConfigMap, error) {
|
||||
var cm *v1.ConfigMap
|
||||
var lastError error
|
||||
err := wait.ExponentialBackoff(clientsetretry.DefaultBackoff, func() (bool, error) {
|
||||
var err error
|
||||
cm, err = client.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
lastError = err
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return cm, nil
|
||||
}
|
||||
return nil, lastError
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/componentconfigs:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config/strict:go_default_library",
|
||||
"//cmd/kubeadm/app/util/runtime:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
)
|
||||
|
||||
// FetchInitConfigurationFromCluster fetches configuration from a ConfigMap in the cluster
|
||||
@ -60,7 +61,7 @@ func FetchInitConfigurationFromCluster(client clientset.Interface, w io.Writer,
|
||||
// getInitConfigurationFromCluster is separate only for testing purposes, don't call it directly, use FetchInitConfigurationFromCluster instead
|
||||
func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Interface, newControlPlane bool) (*kubeadmapi.InitConfiguration, error) {
|
||||
// Also, the config map really should be KubeadmConfigConfigMap...
|
||||
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
||||
configMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, constants.KubeadmConfigConfigMap)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get config map")
|
||||
}
|
||||
@ -211,7 +212,7 @@ func getComponentConfigs(client clientset.Interface, clusterConfiguration *kubea
|
||||
|
||||
// GetClusterStatus returns the kubeadm cluster status read from the kubeadm-config ConfigMap
|
||||
func GetClusterStatus(client clientset.Interface) (*kubeadmapi.ClusterStatus, error) {
|
||||
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
||||
configMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, constants.KubeadmConfigConfigMap)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return &kubeadmapi.ClusterStatus{}, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user