mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #87656 from ereslibre/do-not-depend-on-cluster-status
kubeadm: deprecate the `ClusterStatus` dependency
This commit is contained in:
commit
31b8c0d23d
@ -9,6 +9,7 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"apiendpoint.go",
|
||||
"bootstraptokenhelpers.go",
|
||||
"bootstraptokenstring.go",
|
||||
"doc.go",
|
||||
@ -52,6 +53,7 @@ filegroup(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"apiendpoint_test.go",
|
||||
"bootstraptokenhelpers_test.go",
|
||||
"bootstraptokenstring_test.go",
|
||||
],
|
||||
|
44
cmd/kubeadm/app/apis/kubeadm/apiendpoint.go
Normal file
44
cmd/kubeadm/app/apis/kubeadm/apiendpoint.go
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeadm
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// APIEndpointFromString returns an APIEndpoint struct based on a "host:port" raw string.
|
||||
func APIEndpointFromString(apiEndpoint string) (APIEndpoint, error) {
|
||||
apiEndpointHost, apiEndpointPortStr, err := net.SplitHostPort(apiEndpoint)
|
||||
if err != nil {
|
||||
return APIEndpoint{}, errors.Wrapf(err, "invalid advertise address endpoint: %s", apiEndpoint)
|
||||
}
|
||||
apiEndpointPort, err := net.LookupPort("tcp", apiEndpointPortStr)
|
||||
if err != nil {
|
||||
return APIEndpoint{}, errors.Wrapf(err, "invalid advertise address endpoint port: %s", apiEndpointPortStr)
|
||||
}
|
||||
return APIEndpoint{
|
||||
AdvertiseAddress: apiEndpointHost,
|
||||
BindPort: int32(apiEndpointPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (endpoint *APIEndpoint) String() string {
|
||||
return net.JoinHostPort(endpoint.AdvertiseAddress, strconv.FormatInt(int64(endpoint.BindPort), 10))
|
||||
}
|
50
cmd/kubeadm/app/apis/kubeadm/apiendpoint_test.go
Normal file
50
cmd/kubeadm/app/apis/kubeadm/apiendpoint_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeadm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAPIEndpointFromString(t *testing.T) {
|
||||
var tests = []struct {
|
||||
apiEndpoint string
|
||||
expectedEndpoint APIEndpoint
|
||||
expectedErr bool
|
||||
}{
|
||||
{apiEndpoint: "1.2.3.4:1234", expectedEndpoint: APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234}},
|
||||
{apiEndpoint: "1.2.3.4:-1", expectedErr: true},
|
||||
{apiEndpoint: "1.2.::1234", expectedErr: true},
|
||||
{apiEndpoint: "1.2.3.4:65536", expectedErr: true},
|
||||
{apiEndpoint: "[::1]:1234", expectedEndpoint: APIEndpoint{AdvertiseAddress: "::1", BindPort: 1234}},
|
||||
{apiEndpoint: "[::1]:-1", expectedErr: true},
|
||||
{apiEndpoint: "[::1]:65536", expectedErr: true},
|
||||
{apiEndpoint: "[::1:1234", expectedErr: true},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.apiEndpoint, func(t *testing.T) {
|
||||
apiEndpoint, err := APIEndpointFromString(rt.apiEndpoint)
|
||||
if (err != nil) != rt.expectedErr {
|
||||
t.Errorf("expected error %v, got %v, error: %v", rt.expectedErr, err != nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(apiEndpoint, rt.expectedEndpoint) {
|
||||
t.Errorf("expected API endpoint: %v; got: %v", rt.expectedEndpoint, apiEndpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -18,6 +18,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
utilnet "k8s.io/utils/net"
|
||||
@ -370,6 +371,16 @@ const (
|
||||
// May be overridden by a flag at startup.
|
||||
KubeControllerManagerPort = 10257
|
||||
|
||||
// EtcdAdvertiseClientUrlsAnnotationKey is the annotation key on every etcd pod, describing the
|
||||
// advertise client URLs
|
||||
EtcdAdvertiseClientUrlsAnnotationKey = "kubeadm.kubernetes.io/etcd.advertise-client-urls"
|
||||
// KubeAPIServerAdvertiseAddressEndpointAnnotationKey is the annotation key on every apiserver pod,
|
||||
// describing the API endpoint (advertise address and bind port of the api server)
|
||||
KubeAPIServerAdvertiseAddressEndpointAnnotationKey = "kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint"
|
||||
|
||||
// ControlPlaneTier is the value used in the tier label to identify control plane components
|
||||
ControlPlaneTier = "control-plane"
|
||||
|
||||
// Mode* constants were copied from pkg/kubeapiserver/authorizer/modes
|
||||
// to avoid kubeadm dependency on the internal module
|
||||
// TODO: share Mode* constants in component config
|
||||
@ -433,6 +444,15 @@ var (
|
||||
// KubeadmCertsClusterRoleName sets the name for the ClusterRole that allows
|
||||
// the bootstrap tokens to access the kubeadm-certs Secret during the join of a new control-plane
|
||||
KubeadmCertsClusterRoleName = fmt.Sprintf("kubeadm:%s", KubeadmCertsSecret)
|
||||
|
||||
// StaticPodMirroringDefaultRetry is used a backoff strategy for
|
||||
// waiting for static pods to be mirrored to the apiserver.
|
||||
StaticPodMirroringDefaultRetry = wait.Backoff{
|
||||
Steps: 30,
|
||||
Duration: 1 * time.Second,
|
||||
Factor: 1.0,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
)
|
||||
|
||||
// EtcdSupportedVersion returns officially supported version of etcd for a specific Kubernetes release
|
||||
|
@ -59,7 +59,8 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap
|
||||
LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/healthz", int(endpoint.BindPort), v1.URISchemeHTTPS),
|
||||
Resources: staticpodutil.ComponentResources("250m"),
|
||||
Env: kubeadmutil.GetProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer)),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer),
|
||||
map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: endpoint.String()}),
|
||||
kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeControllerManager,
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeControllerManager, cfg),
|
||||
@ -69,7 +70,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap
|
||||
LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetControllerManagerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeControllerManagerPort, v1.URISchemeHTTPS),
|
||||
Resources: staticpodutil.ComponentResources("200m"),
|
||||
Env: kubeadmutil.GetProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeControllerManager)),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeControllerManager), nil),
|
||||
kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeScheduler,
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeScheduler, cfg),
|
||||
@ -79,7 +80,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap
|
||||
LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetSchedulerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeSchedulerPort, v1.URISchemeHTTPS),
|
||||
Resources: staticpodutil.ComponentResources("100m"),
|
||||
Env: kubeadmutil.GetProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeScheduler)),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeScheduler), nil),
|
||||
}
|
||||
return staticPodSpecs
|
||||
}
|
||||
|
@ -181,18 +181,23 @@ func GetEtcdPodSpec(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.A
|
||||
}
|
||||
// probeHostname returns the correct localhost IP address family based on the endpoint AdvertiseAddress
|
||||
probeHostname, probePort, probeScheme := staticpodutil.GetEtcdProbeEndpoint(&cfg.Etcd, utilsnet.IsIPv6String(endpoint.AdvertiseAddress))
|
||||
return staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.Etcd,
|
||||
Command: getEtcdCommand(cfg, endpoint, nodeName, initialCluster),
|
||||
Image: images.GetEtcdImage(cfg),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
// Mount the etcd datadir path read-write so etcd can store data in a more persistent manner
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.Local.DataDir, false),
|
||||
staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir+"/etcd", false),
|
||||
return staticpodutil.ComponentPod(
|
||||
v1.Container{
|
||||
Name: kubeadmconstants.Etcd,
|
||||
Command: getEtcdCommand(cfg, endpoint, nodeName, initialCluster),
|
||||
Image: images.GetEtcdImage(cfg),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
// Mount the etcd datadir path read-write so etcd can store data in a more persistent manner
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.Local.DataDir, false),
|
||||
staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir+"/etcd", false),
|
||||
},
|
||||
LivenessProbe: staticpodutil.LivenessProbe(probeHostname, "/health", probePort, probeScheme),
|
||||
},
|
||||
LivenessProbe: staticpodutil.LivenessProbe(probeHostname, "/health", probePort, probeScheme),
|
||||
}, etcdMounts)
|
||||
etcdMounts,
|
||||
// etcd will listen on the advertise address of the API server, in a different port (2379)
|
||||
map[string]string{kubeadmconstants.EtcdAdvertiseClientUrlsAnnotationKey: etcdutil.GetClientURL(endpoint)},
|
||||
)
|
||||
}
|
||||
|
||||
// getEtcdCommand builds the right etcd command from the given config object
|
||||
|
@ -31,8 +31,10 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
@ -60,14 +62,18 @@ go_test(
|
||||
"//cmd/kubeadm/app/componentconfigs:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/test/resources:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/github.com/lithammer/dedent:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/github.com/pmezard/go-difflib/difflib:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
|
@ -29,6 +29,8 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
@ -39,6 +41,25 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
)
|
||||
|
||||
// unretriableError is an error used temporarily while we are migrating from the
|
||||
// ClusterStatus struct to an annotation Pod based information. When performing
|
||||
// the upgrade of all control plane nodes with `kubeadm upgrade apply` and
|
||||
// `kubeadm upgrade node` we don't want to retry as if we were hitting connectivity
|
||||
// issues when the pod annotation is missing on the API server pods. This error will
|
||||
// be used in such scenario, for failing fast, and falling back to the ClusterStatus
|
||||
// retrieval in those cases.
|
||||
type unretriableError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func newUnretriableError(err error) *unretriableError {
|
||||
return &unretriableError{err: err}
|
||||
}
|
||||
|
||||
func (ue *unretriableError) Error() string {
|
||||
return fmt.Sprintf("unretriable error: %s", ue.err.Error())
|
||||
}
|
||||
|
||||
// FetchInitConfigurationFromCluster fetches configuration from a ConfigMap in the cluster
|
||||
func FetchInitConfigurationFromCluster(client clientset.Interface, w io.Writer, logPrefix string, newControlPlane bool) (*kubeadmapi.InitConfiguration, error) {
|
||||
fmt.Fprintf(w, "[%s] Reading configuration from the cluster...\n", logPrefix)
|
||||
@ -90,8 +111,8 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte
|
||||
if err := getNodeRegistration(kubeconfigDir, client, &initcfg.NodeRegistration); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get node registration")
|
||||
}
|
||||
// gets the APIEndpoint for the current node from then ClusterStatus in the kubeadm-config ConfigMap
|
||||
if err := getAPIEndpoint(configMap.Data, initcfg.NodeRegistration.Name, &initcfg.LocalAPIEndpoint); err != nil {
|
||||
// gets the APIEndpoint for the current node
|
||||
if err := getAPIEndpoint(client, initcfg.NodeRegistration.Name, &initcfg.LocalAPIEndpoint); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to getAPIEndpoint")
|
||||
}
|
||||
} else {
|
||||
@ -181,25 +202,92 @@ func getNodeNameFromKubeletConfig(kubeconfigDir string) (string, error) {
|
||||
return strings.TrimPrefix(cert.Subject.CommonName, constants.NodesUserPrefix), nil
|
||||
}
|
||||
|
||||
// getAPIEndpoint returns the APIEndpoint for the current node
|
||||
func getAPIEndpoint(data map[string]string, nodeName string, apiEndpoint *kubeadmapi.APIEndpoint) error {
|
||||
// gets the ClusterStatus from kubeadm-config
|
||||
clusterStatus, err := UnmarshalClusterStatus(data)
|
||||
func getAPIEndpoint(client clientset.Interface, nodeName string, apiEndpoint *kubeadmapi.APIEndpoint) error {
|
||||
return getAPIEndpointWithBackoff(client, nodeName, apiEndpoint, constants.StaticPodMirroringDefaultRetry)
|
||||
}
|
||||
|
||||
func getAPIEndpointWithBackoff(client clientset.Interface, nodeName string, apiEndpoint *kubeadmapi.APIEndpoint, backoff wait.Backoff) error {
|
||||
var err error
|
||||
var errs []error
|
||||
|
||||
if err = getAPIEndpointFromPodAnnotation(client, nodeName, apiEndpoint, backoff); err == nil {
|
||||
return nil
|
||||
}
|
||||
errs = append(errs, errors.WithMessagef(err, "could not retrieve API endpoints for node %q using pod annotations", nodeName))
|
||||
|
||||
// NB: this is a fallback when there is no annotation found in the API server pod that contains
|
||||
// the API endpoint, and so we fallback to reading the ClusterStatus struct present in the
|
||||
// kubeadm-config ConfigMap. This can happen for example, when performing the first
|
||||
// `kubeadm upgrade apply` and `kubeadm upgrade node` cycle on the whole cluster. This logic
|
||||
// will be removed when the cluster status struct is removed from the kubeadm-config ConfigMap.
|
||||
if err = getAPIEndpointFromClusterStatus(client, nodeName, apiEndpoint); err == nil {
|
||||
return nil
|
||||
}
|
||||
errs = append(errs, errors.WithMessagef(err, "could not retrieve API endpoints for node %q using cluster status", nodeName))
|
||||
|
||||
return errorsutil.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func getAPIEndpointFromPodAnnotation(client clientset.Interface, nodeName string, apiEndpoint *kubeadmapi.APIEndpoint, backoff wait.Backoff) error {
|
||||
var rawAPIEndpoint string
|
||||
var lastErr error
|
||||
// Let's tolerate some unexpected transient failures from the API server or load balancers. Also, if
|
||||
// static pods were not yet mirrored into the API server we want to wait for this propagation.
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
rawAPIEndpoint, lastErr = getRawAPIEndpointFromPodAnnotationWithoutRetry(client, nodeName)
|
||||
// TODO (ereslibre): this logic will need tweaking once that we get rid of the ClusterStatus, since we won't have
|
||||
// the ClusterStatus safety net, we will want to remove the UnretriableError and not make the distinction here
|
||||
// anymore.
|
||||
if _, ok := lastErr.(*unretriableError); ok {
|
||||
// Fail fast scenario, to be removed once we get rid of the ClusterStatus
|
||||
return true, errors.Wrapf(lastErr, "API server Pods exist, but no API endpoint annotations were found")
|
||||
}
|
||||
return lastErr == nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gets the APIEndpoint for the current machine from the ClusterStatus
|
||||
e, ok := clusterStatus.APIEndpoints[nodeName]
|
||||
if !ok {
|
||||
return errors.New("failed to get APIEndpoint information for this node")
|
||||
parsedAPIEndpoint, err := kubeadmapi.APIEndpointFromString(rawAPIEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not parse API endpoint for node %q", nodeName)
|
||||
}
|
||||
|
||||
apiEndpoint.AdvertiseAddress = e.AdvertiseAddress
|
||||
apiEndpoint.BindPort = e.BindPort
|
||||
*apiEndpoint = parsedAPIEndpoint
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRawAPIEndpointFromPodAnnotationWithoutRetry(client clientset.Interface, nodeName string) (string, error) {
|
||||
podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(
|
||||
context.TODO(),
|
||||
metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName),
|
||||
LabelSelector: fmt.Sprintf("component=%s,tier=%s", constants.KubeAPIServer, constants.ControlPlaneTier),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "could not retrieve list of pods to determine api server endpoints")
|
||||
}
|
||||
if len(podList.Items) != 1 {
|
||||
return "", errors.Errorf("API server pod for node name %q has %d entries, only one was expected", nodeName, len(podList.Items))
|
||||
}
|
||||
if apiServerEndpoint, ok := podList.Items[0].Annotations[constants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey]; ok {
|
||||
return apiServerEndpoint, nil
|
||||
}
|
||||
return "", newUnretriableError(errors.Errorf("API server pod for node name %q hasn't got a %q annotation, cannot retrieve API endpoint", nodeName, constants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey))
|
||||
}
|
||||
|
||||
// TODO: remove after 1.20, when the ClusterStatus struct is removed from the kubeadm-config ConfigMap.
|
||||
func getAPIEndpointFromClusterStatus(client clientset.Interface, nodeName string, apiEndpoint *kubeadmapi.APIEndpoint) error {
|
||||
clusterStatus, err := GetClusterStatus(client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve cluster status")
|
||||
}
|
||||
if statusAPIEndpoint, ok := clusterStatus.APIEndpoints[nodeName]; ok {
|
||||
*apiEndpoint = statusAPIEndpoint
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("could not find node %s in the cluster status", nodeName)
|
||||
}
|
||||
|
||||
// GetClusterStatus returns the kubeadm cluster status read from the kubeadm-config ConfigMap
|
||||
func GetClusterStatus(client clientset.Interface) (*kubeadmapi.ClusterStatus, error) {
|
||||
configMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, constants.KubeadmConfigConfigMap)
|
||||
|
@ -21,18 +21,27 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
testresources "k8s.io/kubernetes/cmd/kubeadm/test/resources"
|
||||
)
|
||||
|
||||
var k8sVersionString = kubeadmconstants.MinimumControlPlaneVersion.String()
|
||||
@ -47,18 +56,6 @@ kind: InitConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: ` + k8sVersionString + `
|
||||
`),
|
||||
"ClusterStatus_v1beta1": []byte(`
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
kind: ClusterStatus
|
||||
apiEndpoints:
|
||||
` + nodeName + `:
|
||||
advertiseAddress: 1.2.3.4
|
||||
bindPort: 1234
|
||||
`),
|
||||
"ClusterStatus_v1beta1_Without_APIEndpoints": []byte(`
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
kind: ClusterStatus
|
||||
`),
|
||||
"InitConfiguration_v1beta2": []byte(`
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
@ -68,18 +65,6 @@ kind: InitConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: ` + k8sVersionString + `
|
||||
`),
|
||||
"ClusterStatus_v1beta2": []byte(`
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: ClusterStatus
|
||||
apiEndpoints:
|
||||
` + nodeName + `:
|
||||
advertiseAddress: 1.2.3.4
|
||||
bindPort: 1234
|
||||
`),
|
||||
"ClusterStatus_v1beta2_Without_APIEndpoints": []byte(`
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: ClusterStatus
|
||||
`),
|
||||
"Kube-proxy_componentconfig": []byte(`
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
@ -373,82 +358,156 @@ func TestGetNodeRegistration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAPIEndpoint(t *testing.T) {
|
||||
func TestGetAPIEndpointWithBackoff(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
configMap fakeConfigMap
|
||||
expectedError bool
|
||||
name string
|
||||
nodeName string
|
||||
staticPod *testresources.FakeStaticPod
|
||||
configMap *testresources.FakeConfigMap
|
||||
expectedEndpoint *kubeadmapi.APIEndpoint
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid v1beta1",
|
||||
configMap: fakeConfigMap{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta1"]),
|
||||
name: "no pod annotations; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid ipv4 endpoint in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234",
|
||||
},
|
||||
},
|
||||
expectedEndpoint: &kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
|
||||
},
|
||||
{
|
||||
name: "invalid v1beta1 - No ClusterStatus in kubeadm-config ConfigMap",
|
||||
configMap: fakeConfigMap{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid v1beta1 - ClusterStatus without APIEndopoints",
|
||||
configMap: fakeConfigMap{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta1_Without_APIEndpoints"]),
|
||||
name: "invalid ipv4 endpoint in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3::1234",
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid v1beta2",
|
||||
configMap: fakeConfigMap{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta2"]),
|
||||
name: "invalid negative port with ipv4 address in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:-1234",
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid v1beta2 - No ClusterStatus in kubeadm-config ConfigMap",
|
||||
configMap: fakeConfigMap{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid v1beta2 - ClusterStatus without APIEndopoints",
|
||||
configMap: fakeConfigMap{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta2_Without_APIEndpoints"]),
|
||||
name: "invalid high port with ipv4 address in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:65536",
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid ipv6 endpoint in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "[::1]:1234",
|
||||
},
|
||||
},
|
||||
expectedEndpoint: &kubeadmapi.APIEndpoint{AdvertiseAddress: "::1", BindPort: 1234},
|
||||
},
|
||||
{
|
||||
name: "invalid ipv6 endpoint in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "[::1:1234",
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid negative port with ipv6 address in pod annotation; no ClusterStatus",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "[::1]:-1234",
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid high port with ipv6 address in pod annotation",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "[::1]:65536",
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "no pod annotations; ClusterStatus with valid ipv4 endpoint",
|
||||
nodeName: nodeName,
|
||||
configMap: testresources.ClusterStatusWithAPIEndpoint(nodeName, kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234}),
|
||||
expectedEndpoint: &kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
|
||||
},
|
||||
{
|
||||
name: "invalid ipv4 endpoint in pod annotation; ClusterStatus with valid ipv4 endpoint",
|
||||
nodeName: nodeName,
|
||||
staticPod: &testresources.FakeStaticPod{
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3::1234",
|
||||
},
|
||||
},
|
||||
configMap: testresources.ClusterStatusWithAPIEndpoint(nodeName, kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234}),
|
||||
expectedEndpoint: &kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
cfg := &kubeadmapi.InitConfiguration{}
|
||||
err := getAPIEndpoint(rt.configMap.data, nodeName, &cfg.LocalAPIEndpoint)
|
||||
if rt.expectedError != (err != nil) {
|
||||
t.Errorf("unexpected return err from getInitConfigurationFromCluster: %v", err)
|
||||
return
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
if rt.staticPod != nil {
|
||||
rt.staticPod.NodeName = rt.nodeName
|
||||
if err := rt.staticPod.Create(client); err != nil {
|
||||
t.Error("could not create static pod")
|
||||
return
|
||||
}
|
||||
}
|
||||
if rt.expectedError {
|
||||
if rt.configMap != nil {
|
||||
if err := rt.configMap.Create(client); err != nil {
|
||||
t.Error("could not create ConfigMap")
|
||||
return
|
||||
}
|
||||
}
|
||||
apiEndpoint := kubeadm.APIEndpoint{}
|
||||
err := getAPIEndpointWithBackoff(client, rt.nodeName, &apiEndpoint, wait.Backoff{Duration: 0, Jitter: 0, Steps: 1})
|
||||
if err != nil && !rt.expectedErr {
|
||||
t.Errorf("got error %q; was expecting no errors", err)
|
||||
return
|
||||
} else if err == nil && rt.expectedErr {
|
||||
t.Error("got no error; was expecting an error")
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.LocalAPIEndpoint.AdvertiseAddress != "1.2.3.4" || cfg.LocalAPIEndpoint.BindPort != 1234 {
|
||||
t.Errorf("invalid cfg.APIEndpoint")
|
||||
if rt.expectedEndpoint != nil && !reflect.DeepEqual(apiEndpoint, *rt.expectedEndpoint) {
|
||||
t.Errorf("expected API endpoint: %v; got %v", rt.expectedEndpoint, apiEndpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -465,7 +524,8 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
name string
|
||||
fileContents []byte
|
||||
node *v1.Node
|
||||
configMaps []fakeConfigMap
|
||||
staticPods []testresources.FakeStaticPod
|
||||
configMaps []testresources.FakeConfigMap
|
||||
newControlPlane bool
|
||||
expectedError bool
|
||||
}{
|
||||
@ -475,33 +535,41 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "invalid - No ClusterConfiguration in kubeadm-config ConfigMap",
|
||||
configMaps: []fakeConfigMap{
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{},
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
Data: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid v1beta1 - new control plane == false", // InitConfiguration composed with data from different places, with also node specific information from ClusterStatus and node
|
||||
configMaps: []fakeConfigMap{
|
||||
staticPods: []testresources.FakeStaticPod{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
NodeName: nodeName,
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234",
|
||||
},
|
||||
},
|
||||
},
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterConfigurationConfigMapKey: string(cfgFiles["ClusterConfiguration_v1beta1"]),
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta1"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeProxyConfigMapKey: string(cfgFiles["Kube-proxy_componentconfig"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(cfgFiles["Kubelet_componentconfig"]),
|
||||
},
|
||||
},
|
||||
@ -521,22 +589,31 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid v1beta1 - new control plane == true", // InitConfiguration composed with data from different places, without node specific information
|
||||
configMaps: []fakeConfigMap{
|
||||
staticPods: []testresources.FakeStaticPod{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
NodeName: nodeName,
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234",
|
||||
},
|
||||
},
|
||||
},
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterConfigurationConfigMapKey: string(cfgFiles["ClusterConfiguration_v1beta1"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeProxyConfigMapKey: string(cfgFiles["Kube-proxy_componentconfig"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(cfgFiles["Kubelet_componentconfig"]),
|
||||
},
|
||||
},
|
||||
@ -545,23 +622,31 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid v1beta2 - new control plane == false", // InitConfiguration composed with data from different places, with also node specific information from ClusterStatus and node
|
||||
configMaps: []fakeConfigMap{
|
||||
staticPods: []testresources.FakeStaticPod{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
NodeName: nodeName,
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234",
|
||||
},
|
||||
},
|
||||
},
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterConfigurationConfigMapKey: string(cfgFiles["ClusterConfiguration_v1beta2"]),
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta2"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeProxyConfigMapKey: string(cfgFiles["Kube-proxy_componentconfig"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(cfgFiles["Kubelet_componentconfig"]),
|
||||
},
|
||||
},
|
||||
@ -581,22 +666,31 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid v1beta2 - new control plane == true", // InitConfiguration composed with data from different places, without node specific information
|
||||
configMaps: []fakeConfigMap{
|
||||
staticPods: []testresources.FakeStaticPod{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
data: map[string]string{
|
||||
NodeName: nodeName,
|
||||
Component: kubeadmconstants.KubeAPIServer,
|
||||
Annotations: map[string]string{
|
||||
kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234",
|
||||
},
|
||||
},
|
||||
},
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap, // ClusterConfiguration from kubeadm-config.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterConfigurationConfigMapKey: string(cfgFiles["ClusterConfiguration_v1beta2"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeProxyConfigMap, // Kube-proxy component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeProxyConfigMapKey: string(cfgFiles["Kube-proxy_componentconfig"]),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.GetKubeletConfigMapName(k8sVersion), // Kubelet component config from corresponding ConfigMap.
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(cfgFiles["Kubelet_componentconfig"]),
|
||||
},
|
||||
},
|
||||
@ -626,10 +720,18 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range rt.configMaps {
|
||||
err := c.create(client)
|
||||
for _, p := range rt.staticPods {
|
||||
err := p.Create(client)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create ConfigMap %s", c.name)
|
||||
t.Errorf("couldn't create pod for nodename %s", p.NodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range rt.configMaps {
|
||||
err := c.Create(client)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create ConfigMap %s", c.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -651,7 +753,7 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
t.Errorf("invalid ClusterConfiguration.KubernetesVersion")
|
||||
}
|
||||
if !rt.newControlPlane && (cfg.LocalAPIEndpoint.AdvertiseAddress != "1.2.3.4" || cfg.LocalAPIEndpoint.BindPort != 1234) {
|
||||
t.Errorf("invalid cfg.LocalAPIEndpoint")
|
||||
t.Errorf("invalid cfg.LocalAPIEndpoint: %v", cfg.LocalAPIEndpoint)
|
||||
}
|
||||
if _, ok := cfg.ComponentConfigs[componentconfigs.KubeletGroup]; !ok {
|
||||
t.Errorf("no cfg.ComponentConfigs[%q]", componentconfigs.KubeletGroup)
|
||||
@ -665,55 +767,51 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
||||
|
||||
func TestGetGetClusterStatus(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
configMaps []fakeConfigMap
|
||||
expectedEndpoints int
|
||||
expectedError bool
|
||||
name string
|
||||
configMaps []testresources.FakeConfigMap
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "invalid missing config map",
|
||||
expectedEndpoints: 0,
|
||||
name: "invalid missing config map",
|
||||
},
|
||||
{
|
||||
name: "valid v1beta1",
|
||||
configMaps: []fakeConfigMap{
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta1"]),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEndpoints: 1,
|
||||
},
|
||||
{
|
||||
name: "valid v1beta2",
|
||||
configMaps: []fakeConfigMap{
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: string(cfgFiles["ClusterStatus_v1beta2"]),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEndpoints: 1,
|
||||
},
|
||||
{
|
||||
name: "invalid missing ClusterStatusConfigMapKey in the config map",
|
||||
configMaps: []fakeConfigMap{
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
data: map[string]string{},
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
Data: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid wrong value in the config map",
|
||||
configMaps: []fakeConfigMap{
|
||||
configMaps: []testresources.FakeConfigMap{
|
||||
{
|
||||
name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
data: map[string]string{
|
||||
Name: kubeadmconstants.KubeadmConfigConfigMap,
|
||||
Data: map[string]string{
|
||||
kubeadmconstants.ClusterStatusConfigMapKey: "not a kubeadm type",
|
||||
},
|
||||
},
|
||||
@ -727,14 +825,14 @@ func TestGetGetClusterStatus(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
|
||||
for _, c := range rt.configMaps {
|
||||
err := c.create(client)
|
||||
err := c.Create(client)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create ConfigMap %s", c.name)
|
||||
t.Errorf("couldn't create ConfigMap %s", c.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
clusterStatus, err := GetClusterStatus(client)
|
||||
_, err := GetClusterStatus(client)
|
||||
if rt.expectedError != (err != nil) {
|
||||
t.Errorf("unexpected return err from GetClusterStatus: %v", err)
|
||||
return
|
||||
@ -742,26 +840,193 @@ func TestGetGetClusterStatus(t *testing.T) {
|
||||
if rt.expectedError {
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test expected values in clusterStatus
|
||||
if len(clusterStatus.APIEndpoints) != rt.expectedEndpoints {
|
||||
t.Errorf("unexpected ClusterStatus return value")
|
||||
func TestGetAPIEndpointFromPodAnnotation(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
nodeName string
|
||||
pods []testresources.FakeStaticPod
|
||||
clientSetup func(*clientsetfake.Clientset)
|
||||
expectedEndpoint kubeadmapi.APIEndpoint
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "exactly one pod with annotation",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234"},
|
||||
},
|
||||
},
|
||||
expectedEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
|
||||
},
|
||||
{
|
||||
name: "no pods with annotation",
|
||||
nodeName: nodeName,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "exactly one pod with annotation; all requests fail",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234"},
|
||||
},
|
||||
},
|
||||
clientSetup: func(clientset *clientsetfake.Clientset) {
|
||||
clientset.PrependReactor("list", "pods", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
|
||||
})
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
for i, pod := range rt.pods {
|
||||
pod.NodeName = rt.nodeName
|
||||
if err := pod.CreateWithPodSuffix(client, strconv.Itoa(i)); err != nil {
|
||||
t.Errorf("error setting up test creating pod for node %q", pod.NodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
if rt.clientSetup != nil {
|
||||
rt.clientSetup(client)
|
||||
}
|
||||
apiEndpoint := kubeadmapi.APIEndpoint{}
|
||||
err := getAPIEndpointFromPodAnnotation(client, rt.nodeName, &apiEndpoint, wait.Backoff{Duration: 0, Jitter: 0, Steps: 1})
|
||||
if err != nil && !rt.expectedErr {
|
||||
t.Errorf("got error %v, but wasn't expecting any error", err)
|
||||
return
|
||||
} else if err == nil && rt.expectedErr {
|
||||
t.Error("didn't get any error; but was expecting an error")
|
||||
return
|
||||
} else if err != nil && rt.expectedErr {
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(apiEndpoint, rt.expectedEndpoint) {
|
||||
t.Errorf("expected API endpoint: %v; got %v", rt.expectedEndpoint, apiEndpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeConfigMap struct {
|
||||
name string
|
||||
data map[string]string
|
||||
}
|
||||
|
||||
func (c *fakeConfigMap) create(client clientset.Interface) error {
|
||||
return apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.name,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
func TestGetRawAPIEndpointFromPodAnnotationWithoutRetry(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
nodeName string
|
||||
pods []testresources.FakeStaticPod
|
||||
clientSetup func(*clientsetfake.Clientset)
|
||||
expectedEndpoint string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "no pods",
|
||||
nodeName: nodeName,
|
||||
expectedErr: true,
|
||||
},
|
||||
Data: c.data,
|
||||
})
|
||||
{
|
||||
name: "exactly one pod with annotation",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234"},
|
||||
},
|
||||
},
|
||||
expectedEndpoint: "1.2.3.4:1234",
|
||||
},
|
||||
{
|
||||
name: "two pods: one with annotation, one missing annotation",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234"},
|
||||
},
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "two pods: different annotations",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234"},
|
||||
},
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.5:1234"},
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "two pods: both missing annotation",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
},
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "exactly one pod with annotation; request fails",
|
||||
nodeName: nodeName,
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.KubeAPIServer,
|
||||
Annotations: map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: "1.2.3.4:1234"},
|
||||
},
|
||||
},
|
||||
clientSetup: func(clientset *clientsetfake.Clientset) {
|
||||
clientset.PrependReactor("list", "pods", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
|
||||
})
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
for i, pod := range rt.pods {
|
||||
pod.NodeName = rt.nodeName
|
||||
if err := pod.CreateWithPodSuffix(client, strconv.Itoa(i)); err != nil {
|
||||
t.Errorf("error setting up test creating pod for node %q", pod.NodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
if rt.clientSetup != nil {
|
||||
rt.clientSetup(client)
|
||||
}
|
||||
endpoint, err := getRawAPIEndpointFromPodAnnotationWithoutRetry(client, rt.nodeName)
|
||||
if err != nil && !rt.expectedErr {
|
||||
t.Errorf("got error %v, but wasn't expecting any error", err)
|
||||
return
|
||||
} else if err == nil && rt.expectedErr {
|
||||
t.Error("didn't get any error; but was expecting an error")
|
||||
return
|
||||
} else if err != nil && rt.expectedErr {
|
||||
return
|
||||
}
|
||||
if endpoint != rt.expectedEndpoint {
|
||||
t.Errorf("expected API endpoint: %v; got: %v", rt.expectedEndpoint, endpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
@ -26,6 +27,13 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/test/resources:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,6 +19,7 @@ package etcd
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
@ -30,6 +31,7 @@ import (
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/pkg/transport"
|
||||
"google.golang.org/grpc"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
@ -86,24 +88,17 @@ func New(endpoints []string, ca, cert, key string) (*Client, error) {
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// NewFromCluster creates an etcd client for the etcd endpoints defined in the ClusterStatus value stored in
|
||||
// the kubeadm-config ConfigMap in kube-system namespace.
|
||||
// Once created, the client synchronizes client's endpoints with the known endpoints from the etcd membership API (reality check).
|
||||
// NewFromCluster creates an etcd client for the etcd endpoints present in etcd member list. In order to compose this information,
|
||||
// it will first discover at least one etcd endpoint to connect to. Once created, the client synchronizes client's endpoints with
|
||||
// the known endpoints from the etcd membership API, since it is the authoritative source of truth for the list of available members.
|
||||
func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client, error) {
|
||||
// etcd is listening the API server advertise address on each control-plane node
|
||||
// so it is necessary to get the list of endpoints from kubeadm cluster status before connecting
|
||||
// Discover at least one etcd endpoint to connect to by inspecting the existing etcd pods
|
||||
|
||||
// Gets the cluster status
|
||||
clusterStatus, err := config.GetClusterStatus(client)
|
||||
// Get the list of etcd endpoints
|
||||
endpoints, err := getEtcdEndpoints(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the list of etcd endpoints from cluster status
|
||||
endpoints := []string{}
|
||||
for _, e := range clusterStatus.APIEndpoints {
|
||||
endpoints = append(endpoints, GetClientURLByIP(e.AdvertiseAddress))
|
||||
}
|
||||
klog.V(1).Infof("etcd endpoints read from pods: %s", strings.Join(endpoints, ","))
|
||||
|
||||
// Creates an etcd client
|
||||
@ -127,6 +122,95 @@ func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client
|
||||
return etcdClient, nil
|
||||
}
|
||||
|
||||
// getEtcdEndpoints returns the list of etcd endpoints.
|
||||
func getEtcdEndpoints(client clientset.Interface) ([]string, error) {
|
||||
return getEtcdEndpointsWithBackoff(client, constants.StaticPodMirroringDefaultRetry)
|
||||
}
|
||||
|
||||
func getEtcdEndpointsWithBackoff(client clientset.Interface, backoff wait.Backoff) ([]string, error) {
|
||||
etcdEndpoints, err := getRawEtcdEndpointsFromPodAnnotation(client, backoff)
|
||||
if err != nil {
|
||||
// NB: this is a fallback when there is no annotation found in the etcd pods that contains
|
||||
// the client URL, and so we fallback to reading the ClusterStatus struct present in the
|
||||
// kubeadm-config ConfigMap. This can happen for example, when performing the first
|
||||
// `kubeadm upgrade apply`. This logic will be removed when the cluster status struct
|
||||
// is removed from the kubeadm-config ConfigMap.
|
||||
return getRawEtcdEndpointsFromClusterStatus(client)
|
||||
}
|
||||
return etcdEndpoints, nil
|
||||
}
|
||||
|
||||
// getRawEtcdEndpointsFromPodAnnotation returns the list of endpoints as reported on etcd's pod annotations using the given backoff
|
||||
func getRawEtcdEndpointsFromPodAnnotation(client clientset.Interface, backoff wait.Backoff) ([]string, error) {
|
||||
etcdEndpoints := []string{}
|
||||
var lastErr error
|
||||
// Let's tolerate some unexpected transient failures from the API server or load balancers. Also, if
|
||||
// static pods were not yet mirrored into the API server we want to wait for this propagation.
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
var overallEtcdPodCount int
|
||||
if etcdEndpoints, overallEtcdPodCount, lastErr = getRawEtcdEndpointsFromPodAnnotationWithoutRetry(client); lastErr != nil {
|
||||
return false, nil
|
||||
}
|
||||
// TODO (ereslibre): this logic will need tweaking once that we get rid of the ClusterStatus, since we won't have
|
||||
// the ClusterStatus safety net we will have to retry in both cases.
|
||||
if len(etcdEndpoints) == 0 {
|
||||
if overallEtcdPodCount == 0 {
|
||||
return false, nil
|
||||
}
|
||||
// Fail fast scenario, to be removed once we get rid of the ClusterStatus
|
||||
return true, errors.New("etcd Pods exist, but no etcd endpoint annotations were found")
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
if lastErr != nil {
|
||||
return []string{}, errors.Wrap(lastErr, "could not retrieve the list of etcd endpoints")
|
||||
}
|
||||
return []string{}, errors.Wrap(err, "could not retrieve the list of etcd endpoints")
|
||||
}
|
||||
return etcdEndpoints, nil
|
||||
}
|
||||
|
||||
// getRawEtcdEndpointsFromPodAnnotationWithoutRetry returns the list of etcd endpoints as reported by etcd Pod annotations,
|
||||
// along with the number of global etcd pods. This allows for callers to tell the difference between "no endpoints found",
|
||||
// and "no endpoints found and pods were listed", so they can skip retrying.
|
||||
func getRawEtcdEndpointsFromPodAnnotationWithoutRetry(client clientset.Interface) ([]string, int, error) {
|
||||
klog.V(3).Infof("retrieving etcd endpoints from %q annotation in etcd Pods", constants.EtcdAdvertiseClientUrlsAnnotationKey)
|
||||
podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(
|
||||
context.TODO(),
|
||||
metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("component=%s,tier=%s", constants.Etcd, constants.ControlPlaneTier),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return []string{}, 0, err
|
||||
}
|
||||
etcdEndpoints := []string{}
|
||||
for _, pod := range podList.Items {
|
||||
etcdEndpoint, ok := pod.ObjectMeta.Annotations[constants.EtcdAdvertiseClientUrlsAnnotationKey]
|
||||
if !ok {
|
||||
klog.V(3).Infof("etcd Pod %q is missing the %q annotation; cannot infer etcd advertise client URL using the Pod annotation", pod.ObjectMeta.Name, constants.EtcdAdvertiseClientUrlsAnnotationKey)
|
||||
continue
|
||||
}
|
||||
etcdEndpoints = append(etcdEndpoints, etcdEndpoint)
|
||||
}
|
||||
return etcdEndpoints, len(podList.Items), nil
|
||||
}
|
||||
|
||||
// TODO: remove after 1.20, when the ClusterStatus struct is removed from the kubeadm-config ConfigMap.
|
||||
func getRawEtcdEndpointsFromClusterStatus(client clientset.Interface) ([]string, error) {
|
||||
klog.V(3).Info("retrieving etcd endpoints from the cluster status")
|
||||
clusterStatus, err := config.GetClusterStatus(client)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
etcdEndpoints := []string{}
|
||||
for _, e := range clusterStatus.APIEndpoints {
|
||||
etcdEndpoints = append(etcdEndpoints, GetClientURLByIP(e.AdvertiseAddress))
|
||||
}
|
||||
return etcdEndpoints, nil
|
||||
}
|
||||
|
||||
// dialTimeout is the timeout for failing to establish a connection.
|
||||
// It is set to >20 seconds as times shorter than that will cause TLS connections to fail
|
||||
// on heavily loaded arm64 CPUs (issue #64649)
|
||||
|
@ -18,11 +18,19 @@ package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
testresources "k8s.io/kubernetes/cmd/kubeadm/test/resources"
|
||||
)
|
||||
|
||||
func testGetURL(t *testing.T, getURLFunc func(*kubeadmapi.APIEndpoint) string, port int) {
|
||||
@ -106,3 +114,217 @@ func TestGetClientURLByIP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEtcdEndpointsWithBackoff(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
pods []testresources.FakeStaticPod
|
||||
configMap *testresources.FakeConfigMap
|
||||
expectedEndpoints []string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "no pod annotations; no ClusterStatus",
|
||||
expectedEndpoints: []string{},
|
||||
},
|
||||
{
|
||||
name: "ipv4 endpoint in pod annotation; no ClusterStatus; port is preserved",
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{
|
||||
constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.4:1234",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEndpoints: []string{"https://1.2.3.4:1234"},
|
||||
},
|
||||
{
|
||||
name: "no pod annotations; ClusterStatus with valid ipv4 endpoint; port is inferred",
|
||||
configMap: testresources.ClusterStatusWithAPIEndpoint("cp-0", kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4", BindPort: 1234}),
|
||||
expectedEndpoints: []string{"https://1.2.3.4:2379"},
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
for _, pod := range rt.pods {
|
||||
if err := pod.Create(client); err != nil {
|
||||
t.Errorf("error setting up test creating pod for node %q", pod.NodeName)
|
||||
}
|
||||
}
|
||||
if rt.configMap != nil {
|
||||
if err := rt.configMap.Create(client); err != nil {
|
||||
t.Error("could not create ConfigMap")
|
||||
}
|
||||
}
|
||||
endpoints, err := getEtcdEndpointsWithBackoff(client, wait.Backoff{Duration: 0, Jitter: 0, Steps: 1})
|
||||
if err != nil && !rt.expectedErr {
|
||||
t.Errorf("got error %q; was expecting no errors", err)
|
||||
return
|
||||
} else if err == nil && rt.expectedErr {
|
||||
t.Error("got no error; was expecting an error")
|
||||
return
|
||||
} else if err != nil && rt.expectedErr {
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(endpoints, rt.expectedEndpoints) {
|
||||
t.Errorf("expected etcd endpoints: %v; got: %v", rt.expectedEndpoints, endpoints)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRawEtcdEndpointsFromPodAnnotation(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
pods []testresources.FakeStaticPod
|
||||
clientSetup func(*clientsetfake.Clientset)
|
||||
expectedEndpoints []string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "exactly one pod with annotation",
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
NodeName: "cp-0",
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.4:2379"},
|
||||
},
|
||||
},
|
||||
expectedEndpoints: []string{"https://1.2.3.4:2379"},
|
||||
},
|
||||
{
|
||||
name: "no pods with annotation",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "exactly one pod with annotation; all requests fail",
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
NodeName: "cp-0",
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.4:2379"},
|
||||
},
|
||||
},
|
||||
clientSetup: func(clientset *clientsetfake.Clientset) {
|
||||
clientset.PrependReactor("list", "pods", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
|
||||
})
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
for i, pod := range rt.pods {
|
||||
if err := pod.CreateWithPodSuffix(client, strconv.Itoa(i)); err != nil {
|
||||
t.Errorf("error setting up test creating pod for node %q", pod.NodeName)
|
||||
}
|
||||
}
|
||||
if rt.clientSetup != nil {
|
||||
rt.clientSetup(client)
|
||||
}
|
||||
endpoints, err := getRawEtcdEndpointsFromPodAnnotation(client, wait.Backoff{Duration: 0, Jitter: 0, Steps: 1})
|
||||
if err != nil && !rt.expectedErr {
|
||||
t.Errorf("got error %v, but wasn't expecting any error", err)
|
||||
return
|
||||
} else if err == nil && rt.expectedErr {
|
||||
t.Error("didn't get any error; but was expecting an error")
|
||||
return
|
||||
} else if err != nil && rt.expectedErr {
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(endpoints, rt.expectedEndpoints) {
|
||||
t.Errorf("expected etcd endpoints: %v; got: %v", rt.expectedEndpoints, endpoints)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRawEtcdEndpointsFromPodAnnotationWithoutRetry(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
pods []testresources.FakeStaticPod
|
||||
clientSetup func(*clientsetfake.Clientset)
|
||||
expectedEndpoints []string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "no pods",
|
||||
expectedEndpoints: []string{},
|
||||
},
|
||||
{
|
||||
name: "exactly one pod with annotation",
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
NodeName: "cp-0",
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.4:2379"},
|
||||
},
|
||||
},
|
||||
expectedEndpoints: []string{"https://1.2.3.4:2379"},
|
||||
},
|
||||
{
|
||||
name: "two pods with annotation",
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
NodeName: "cp-0",
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.4:2379"},
|
||||
},
|
||||
{
|
||||
NodeName: "cp-1",
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.5:2379"},
|
||||
},
|
||||
},
|
||||
expectedEndpoints: []string{"https://1.2.3.4:2379", "https://1.2.3.5:2379"},
|
||||
},
|
||||
{
|
||||
name: "exactly one pod with annotation; request fails",
|
||||
pods: []testresources.FakeStaticPod{
|
||||
{
|
||||
NodeName: "cp-0",
|
||||
Component: constants.Etcd,
|
||||
Annotations: map[string]string{constants.EtcdAdvertiseClientUrlsAnnotationKey: "https://1.2.3.4:2379"},
|
||||
},
|
||||
},
|
||||
clientSetup: func(clientset *clientsetfake.Clientset) {
|
||||
clientset.PrependReactor("list", "pods", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
|
||||
})
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
for _, pod := range rt.pods {
|
||||
if err := pod.Create(client); err != nil {
|
||||
t.Errorf("error setting up test creating pod for node %q", pod.NodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
if rt.clientSetup != nil {
|
||||
rt.clientSetup(client)
|
||||
}
|
||||
endpoints, _, err := getRawEtcdEndpointsFromPodAnnotationWithoutRetry(client)
|
||||
if err != nil && !rt.expectedErr {
|
||||
t.Errorf("got error %v, but wasn't expecting any error", err)
|
||||
return
|
||||
} else if err == nil && rt.expectedErr {
|
||||
t.Error("didn't get any error; but was expecting an error")
|
||||
return
|
||||
} else if err != nil && rt.expectedErr {
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(endpoints, rt.expectedEndpoints) {
|
||||
t.Errorf("expected etcd endpoints: %v; got: %v", rt.expectedEndpoints, endpoints)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -45,8 +45,8 @@ const (
|
||||
kubeSchedulerBindAddressArg = "bind-address"
|
||||
)
|
||||
|
||||
// ComponentPod returns a Pod object from the container and volume specifications
|
||||
func ComponentPod(container v1.Container, volumes map[string]v1.Volume) v1.Pod {
|
||||
// ComponentPod returns a Pod object from the container, volume and annotations specifications
|
||||
func ComponentPod(container v1.Container, volumes map[string]v1.Volume, annotations map[string]string) v1.Pod {
|
||||
return v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
@ -57,7 +57,8 @@ func ComponentPod(container v1.Container, volumes map[string]v1.Volume) v1.Pod {
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
// The component and tier labels are useful for quickly identifying the control plane Pods when doing a .List()
|
||||
// against Pods in the kube-system namespace. Can for example be used together with the WaitForPodsWithLabel function
|
||||
Labels: map[string]string{"component": container.Name, "tier": "control-plane"},
|
||||
Labels: map[string]string{"component": container.Name, "tier": kubeadmconstants.ControlPlaneTier},
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{container},
|
||||
|
@ -426,7 +426,7 @@ func TestComponentPod(t *testing.T) {
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
c := v1.Container{Name: rt.name}
|
||||
actual := ComponentPod(c, map[string]v1.Volume{})
|
||||
actual := ComponentPod(c, map[string]v1.Volume{}, nil)
|
||||
if !reflect.DeepEqual(rt.expected, actual) {
|
||||
t.Errorf(
|
||||
"failed componentPod:\n\texpected: %v\n\t actual: %v",
|
||||
|
@ -33,6 +33,7 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//cmd/kubeadm/test/cmd:all-srcs",
|
||||
"//cmd/kubeadm/test/kubeconfig:all-srcs",
|
||||
"//cmd/kubeadm/test/resources:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
35
cmd/kubeadm/test/resources/BUILD
Normal file
35
cmd/kubeadm/test/resources/BUILD
Normal file
@ -0,0 +1,35 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cluster_status.go",
|
||||
"configmap.go",
|
||||
"pods.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/kubeadm/test/resources",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/staticpod:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
41
cmd/kubeadm/test/resources/cluster_status.go
Normal file
41
cmd/kubeadm/test/resources/cluster_status.go
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
|
||||
// ClusterStatusWithAPIEndpoint returns a FakeConfigMap containing a
|
||||
// cluster status with the provided endpoint for nodeName as a single
|
||||
// entry
|
||||
func ClusterStatusWithAPIEndpoint(nodeName string, endpoint kubeadmapi.APIEndpoint) *FakeConfigMap {
|
||||
marshaledClusterStatus, _ := json.Marshal(kubeadmapi.ClusterStatus{
|
||||
APIEndpoints: map[string]kubeadmapi.APIEndpoint{
|
||||
nodeName: endpoint,
|
||||
},
|
||||
})
|
||||
return &FakeConfigMap{
|
||||
Name: constants.KubeadmConfigConfigMap,
|
||||
Data: map[string]string{
|
||||
constants.ClusterStatusConfigMapKey: string(marshaledClusterStatus),
|
||||
},
|
||||
}
|
||||
}
|
41
cmd/kubeadm/test/resources/configmap.go
Normal file
41
cmd/kubeadm/test/resources/configmap.go
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
)
|
||||
|
||||
// FakeConfigMap represents a fake config map
|
||||
type FakeConfigMap struct {
|
||||
Name string
|
||||
Data map[string]string
|
||||
}
|
||||
|
||||
// Create creates a fake configmap using the provided client
|
||||
func (c *FakeConfigMap) Create(client clientset.Interface) error {
|
||||
return apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.Name,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Data: c.Data,
|
||||
})
|
||||
}
|
66
cmd/kubeadm/test/resources/pods.go
Normal file
66
cmd/kubeadm/test/resources/pods.go
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
|
||||
)
|
||||
|
||||
// FakeStaticPod represents a fake static pod
|
||||
type FakeStaticPod struct {
|
||||
NodeName string
|
||||
Component string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
// Pod returns a pod structure representing the fake static pod with a
|
||||
// given suffix
|
||||
func (p *FakeStaticPod) Pod(suffix string) *v1.Pod {
|
||||
pod := staticpodutil.ComponentPod(
|
||||
v1.Container{
|
||||
Name: p.Component,
|
||||
Image: fmt.Sprintf("%s-image:tag", p.Component),
|
||||
},
|
||||
map[string]v1.Volume{},
|
||||
p.Annotations,
|
||||
)
|
||||
if len(suffix) > 0 {
|
||||
pod.ObjectMeta.Name = fmt.Sprintf("%s-%s-%s", p.Component, p.NodeName, suffix)
|
||||
} else {
|
||||
pod.ObjectMeta.Name = fmt.Sprintf("%s-%s", p.Component, p.NodeName)
|
||||
}
|
||||
pod.Spec.NodeName = p.NodeName
|
||||
return &pod
|
||||
}
|
||||
|
||||
// Create creates a fake static pod using the provided client
|
||||
func (p *FakeStaticPod) Create(client clientset.Interface) error {
|
||||
return p.CreateWithPodSuffix(client, "")
|
||||
}
|
||||
|
||||
// CreateWithPodSuffix creates a fake static pod using the provided
|
||||
// client and suffix
|
||||
func (p *FakeStaticPod) CreateWithPodSuffix(client clientset.Interface, suffix string) error {
|
||||
_, err := client.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), p.Pod(suffix), metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
Loading…
Reference in New Issue
Block a user