mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
kubeadm: refactor the dry-run logic
The current dryrun client implemnetation is suboptimal and sparse. It has the following problems: - When an object CREATE or UPDATE reaches the default dryrun client the operation is a NO-OP, which means subsequent GET calls must fully emulate the object that exists in the store. - There are multiple implmentations of a DryRunGetter interface such the one in init_dryrun.go but there are no implementations for reset, upgrade, join. - There is a specific DryRunGetter that is backed by a real client in clientbacked_dryrun.go, but this is used for upgrade and does not work in conjuction with a fake client. This commit does the following changes: - Removes all existing *dryrun*.go implementations. - Add a new DryRun implementation in dryrun.go that implements 3 clients - fake clientset, real clientset, real dynamic client. - The DryRun object uses the method chaining pattern. - Allows the user opt-in into real clients only if needed, by passing a real kubeconfig. By default only constructs a fake client. - The default reactor chain for the fake client, always logs the object action, then for GET or LIST actions attempts to use the real dynamic client to get the object. If a real object does not exist it attempts to get the object from the fake object store. - The user can prepend or append reactors to the chain. - All known needed reactors for operations during init, join, reset, upgrade are added as methods of the DryRun struct. - Adds detailed unit test for the DryRun struct and its methods including reactors. Additional changes: - Use the new DryRun implementation in all command workflows - init, join, reset, upgrade. - Ensure that --dry-run works even if there is no active cluster by returning faked objects. For join, a faked cluster-info with a fake bootstrap token and CA are used.
This commit is contained in:
parent
5973accf48
commit
30f9893374
@ -28,6 +28,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
@ -349,10 +350,7 @@ func newInitData(cmd *cobra.Command, args []string, initOptions *initOptions, ou
|
||||
// if dry running creates a temporary folder for saving kubeadm generated files
|
||||
dryRunDir := ""
|
||||
if initOptions.dryRun || cfg.DryRun {
|
||||
// the KUBEADM_INIT_DRYRUN_DIR environment variable allows overriding the dry-run temporary
|
||||
// directory from the command line. This makes it possible to run "kubeadm init" integration
|
||||
// tests without root.
|
||||
if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm(os.Getenv("KUBEADM_INIT_DRYRUN_DIR"), "kubeadm-init-dryrun"); err != nil {
|
||||
if dryRunDir, err = kubeadmconstants.GetDryRunDir(kubeadmconstants.EnvVarInitDryRunDir, "kubeadm-init-dryrun", klog.Warningf); err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create a temporary directory")
|
||||
}
|
||||
}
|
||||
@ -502,12 +500,16 @@ func (d *initData) OutputWriter() io.Writer {
|
||||
|
||||
// getDryRunClient creates a fake client that answers some GET calls in order to be able to do the full init flow in dry-run mode.
|
||||
func getDryRunClient(d *initData) (clientset.Interface, error) {
|
||||
svcSubnetCIDR, err := kubeadmconstants.GetKubernetesServiceCIDR(d.cfg.Networking.ServiceSubnet)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get internal Kubernetes Service IP from the given service CIDR (%s)", d.cfg.Networking.ServiceSubnet)
|
||||
dryRun := apiclient.NewDryRun()
|
||||
if err := dryRun.WithKubeConfigFile(d.KubeConfigPath()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dryRunGetter := apiclient.NewInitDryRunGetter(d.cfg.NodeRegistration.Name, svcSubnetCIDR.String())
|
||||
return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil
|
||||
dryRun.WithDefaultMarshalFunction().
|
||||
WithWriter(os.Stdout).
|
||||
PrependReactor(dryRun.GetNodeReactor()).
|
||||
PrependReactor(dryRun.PatchNodeReactor())
|
||||
|
||||
return dryRun.FakeClient(), nil
|
||||
}
|
||||
|
||||
// Client returns a Kubernetes client to be used by kubeadm.
|
||||
|
@ -45,6 +45,8 @@ import (
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/discovery/token"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
)
|
||||
@ -464,7 +466,7 @@ func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Wri
|
||||
// if dry running, creates a temporary folder to save kubeadm generated files
|
||||
dryRunDir := ""
|
||||
if opt.dryRun || cfg.DryRun {
|
||||
if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-join-dryrun"); err != nil {
|
||||
if dryRunDir, err = kubeadmconstants.GetDryRunDir(kubeadmconstants.EnvVarJoinDryRunDir, "kubeadm-join-dryrun", klog.Warningf); err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create a temporary directory on dryrun")
|
||||
}
|
||||
}
|
||||
@ -535,8 +537,19 @@ func (j *joinData) TLSBootstrapCfg() (*clientcmdapi.Config, error) {
|
||||
if j.tlsBootstrapCfg != nil {
|
||||
return j.tlsBootstrapCfg, nil
|
||||
}
|
||||
|
||||
var (
|
||||
client clientset.Interface
|
||||
err error
|
||||
)
|
||||
if j.dryRun {
|
||||
client, err = j.Client()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create a client for TLS bootstrap")
|
||||
}
|
||||
}
|
||||
klog.V(1).Infoln("[preflight] Discovering cluster-info")
|
||||
tlsBootstrapCfg, err := discovery.For(j.cfg)
|
||||
tlsBootstrapCfg, err := discovery.For(client, j.cfg)
|
||||
j.tlsBootstrapCfg = tlsBootstrapCfg
|
||||
return tlsBootstrapCfg, err
|
||||
}
|
||||
@ -550,19 +563,58 @@ func (j *joinData) InitCfg() (*kubeadmapi.InitConfiguration, error) {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(1).Infoln("[preflight] Fetching init configuration")
|
||||
initCfg, err := fetchInitConfigurationFromJoinConfiguration(j.cfg, j.tlsBootstrapCfg)
|
||||
var client clientset.Interface
|
||||
if j.dryRun {
|
||||
var err error
|
||||
client, err = j.Client()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get dry-run client for fetching InitConfiguration")
|
||||
}
|
||||
}
|
||||
initCfg, err := fetchInitConfigurationFromJoinConfiguration(j.cfg, client, j.tlsBootstrapCfg)
|
||||
j.initCfg = initCfg
|
||||
return initCfg, err
|
||||
}
|
||||
|
||||
// Client returns the Client for accessing the cluster with the identity defined in admin.conf.
|
||||
func (j *joinData) Client() (clientset.Interface, error) {
|
||||
if j.client != nil {
|
||||
pathAdmin := filepath.Join(j.KubeConfigDir(), kubeadmconstants.AdminKubeConfigFileName)
|
||||
|
||||
if j.dryRun {
|
||||
dryRun := apiclient.NewDryRun()
|
||||
// For the dynamic dry-run client use this kubeconfig only if it exists.
|
||||
// That would happen presumably after TLS bootstrap.
|
||||
if _, err := os.Stat(pathAdmin); err == nil {
|
||||
if err := dryRun.WithKubeConfigFile(pathAdmin); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if j.tlsBootstrapCfg != nil {
|
||||
if err := dryRun.WithKubeConfig(j.tlsBootstrapCfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if j.cfg.Discovery.BootstrapToken != nil {
|
||||
insecureConfig := token.BuildInsecureBootstrapKubeConfig(j.cfg.Discovery.BootstrapToken.APIServerEndpoint)
|
||||
resetConfig, err := clientcmd.NewDefaultClientConfig(*insecureConfig, &clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create API client configuration from kubeconfig")
|
||||
}
|
||||
if err := dryRun.WithRestConfig(resetConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
dryRun.WithDefaultMarshalFunction().
|
||||
WithWriter(os.Stdout).
|
||||
AppendReactor(dryRun.GetClusterInfoReactor()).
|
||||
AppendReactor(dryRun.GetKubeadmConfigReactor()).
|
||||
AppendReactor(dryRun.GetKubeProxyConfigReactor()).
|
||||
AppendReactor(dryRun.GetKubeletConfigReactor())
|
||||
|
||||
j.client = dryRun.FakeClient()
|
||||
return j.client, nil
|
||||
}
|
||||
path := filepath.Join(j.KubeConfigDir(), kubeadmconstants.AdminKubeConfigFileName)
|
||||
|
||||
client, err := kubeconfigutil.ClientSetFromFile(path)
|
||||
client, err := kubeconfigutil.ClientSetFromFile(pathAdmin)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "[preflight] couldn't create Kubernetes client")
|
||||
}
|
||||
@ -593,10 +645,18 @@ func (j *joinData) PatchesDir() string {
|
||||
}
|
||||
|
||||
// fetchInitConfigurationFromJoinConfiguration retrieves the init configuration from a join configuration, performing the discovery
|
||||
func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfiguration, tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) {
|
||||
// Retrieves the kubeadm configuration
|
||||
func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfiguration, client clientset.Interface, tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) {
|
||||
var err error
|
||||
|
||||
klog.V(1).Infoln("[preflight] Retrieving KubeConfig objects")
|
||||
initConfiguration, err := fetchInitConfiguration(tlsBootstrapCfg)
|
||||
if client == nil {
|
||||
// creates a client to access the cluster using the bootstrap token identity
|
||||
client, err = kubeconfigutil.ToClientSet(tlsBootstrapCfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to access the cluster")
|
||||
}
|
||||
}
|
||||
initConfiguration, err := fetchInitConfiguration(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -618,15 +678,8 @@ func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfigurati
|
||||
}
|
||||
|
||||
// fetchInitConfiguration reads the cluster configuration from the kubeadm-admin configMap
|
||||
func fetchInitConfiguration(tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) {
|
||||
// creates a client to access the cluster using the bootstrap token identity
|
||||
tlsClient, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to access the cluster")
|
||||
}
|
||||
|
||||
// Fetches the init configuration
|
||||
initConfiguration, err := configutil.FetchInitConfigurationFromCluster(tlsClient, nil, "preflight", true, false)
|
||||
func fetchInitConfiguration(client clientset.Interface) (*kubeadmapi.InitConfiguration, error) {
|
||||
initConfiguration, err := configutil.FetchInitConfigurationFromCluster(client, nil, "preflight", true, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to fetch the kubeadm-config ConfigMap")
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/klog/v2"
|
||||
@ -150,11 +151,18 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
|
||||
}()
|
||||
}
|
||||
|
||||
// Create the bootstrap client before we possibly overwrite the server address
|
||||
// for ControlPlaneKubeletLocalMode.
|
||||
bootstrapClient, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not create client from bootstrap kubeconfig")
|
||||
var client clientset.Interface
|
||||
// If dry-use the client from joinData, else create a new bootstrap client
|
||||
if data.DryRun() {
|
||||
client, err = data.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
client, err = kubeconfigutil.ToClientSet(tlsBootstrapCfg)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not create client from bootstrap kubeconfig")
|
||||
}
|
||||
}
|
||||
|
||||
if features.Enabled(initCfg.FeatureGates, features.ControlPlaneKubeletLocalMode) {
|
||||
@ -204,7 +212,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
|
||||
// A new Node with the same name as an existing control-plane Node can cause undefined
|
||||
// behavior and ultimately control-plane failure.
|
||||
klog.V(1).Infof("[kubelet-start] Checking for an existing Node in the cluster with name %q and status %q", nodeName, v1.NodeReady)
|
||||
node, err := bootstrapClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "cannot get Node %q", nodeName)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func runCleanupNode(c workflow.RunData) error {
|
||||
klog.Warningln("[reset] Please ensure kubelet is stopped manually")
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[reset] Would stop the kubelet service")
|
||||
fmt.Println("[dryrun] Would stop the kubelet service")
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ func runCleanupNode(c workflow.RunData) error {
|
||||
dirsToClean = append(dirsToClean, kubeletRunDirectory)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[reset] Would unmount mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory)
|
||||
fmt.Printf("[dryrun] Would unmount mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory)
|
||||
}
|
||||
|
||||
if !r.DryRun() {
|
||||
@ -105,7 +105,7 @@ func runCleanupNode(c workflow.RunData) error {
|
||||
klog.Warningf("[reset] Failed to remove containers: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[reset] Would remove Kubernetes-managed containers")
|
||||
fmt.Println("[dryrun] Would remove Kubernetes-managed containers")
|
||||
}
|
||||
|
||||
// Remove contents from the config and pki directories
|
||||
@ -115,7 +115,7 @@ func runCleanupNode(c workflow.RunData) error {
|
||||
|
||||
dirsToClean = append(dirsToClean, certsDir)
|
||||
if r.CleanupTmpDir() {
|
||||
tempDir := path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDirForKubeadm)
|
||||
tempDir := path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDir)
|
||||
dirsToClean = append(dirsToClean, tempDir)
|
||||
}
|
||||
resetConfigDir(kubeadmconstants.KubernetesDir, dirsToClean, r.DryRun())
|
||||
@ -127,7 +127,7 @@ func runCleanupNode(c workflow.RunData) error {
|
||||
klog.Warningf("[reset] Failed to remove users and groups: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[reset] Would remove users and groups created for rootless control-plane")
|
||||
fmt.Println("[dryrun] Would remove users and groups created for rootless control-plane")
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,7 +156,7 @@ func resetConfigDir(configPathDir string, dirsToClean []string, isDryRun bool) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[reset] Would delete contents of directories: %v\n", dirsToClean)
|
||||
fmt.Printf("[dryrun] Would delete contents of directories: %v\n", dirsToClean)
|
||||
}
|
||||
|
||||
filesToClean := []string{
|
||||
@ -176,7 +176,7 @@ func resetConfigDir(configPathDir string, dirsToClean []string, isDryRun bool) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[reset] Would delete files: %v\n", filesToClean)
|
||||
fmt.Printf("[dryrun] Would delete files: %v\n", filesToClean)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ func TestConfigDirCleaner(t *testing.T) {
|
||||
dirsToClean := []string{
|
||||
filepath.Join(tmpDir, test.resetDir),
|
||||
filepath.Join(tmpDir, kubeadmconstants.ManifestsSubDirName),
|
||||
filepath.Join(tmpDir, kubeadmconstants.TempDirForKubeadm),
|
||||
filepath.Join(tmpDir, kubeadmconstants.TempDir),
|
||||
}
|
||||
resetConfigDir(tmpDir, dirsToClean, false)
|
||||
|
||||
|
@ -74,8 +74,8 @@ func runRemoveETCDMemberPhase(c workflow.RunData) error {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[reset] Would remove the etcd member on this node from the etcd cluster")
|
||||
fmt.Printf("[reset] Would delete contents of the etcd data directory: %v\n", etcdDataDir)
|
||||
fmt.Println("[dryrun] Would remove the etcd member on this node from the etcd cluster")
|
||||
fmt.Printf("[dryrun] Would delete contents of the etcd data directory: %v\n", etcdDataDir)
|
||||
}
|
||||
}
|
||||
// This could happen if the phase `cleanup-node` is run before the `remove-etcd-member`.
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
@ -39,7 +40,9 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime"
|
||||
)
|
||||
|
||||
@ -104,7 +107,10 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var initCfg *kubeadmapi.InitConfiguration
|
||||
var (
|
||||
initCfg *kubeadmapi.InitConfiguration
|
||||
client clientset.Interface
|
||||
)
|
||||
|
||||
// Either use the config file if specified, or convert public kubeadm API to the internal ResetConfiguration and validates cfg.
|
||||
resetCfg, err := configutil.LoadOrDefaultResetConfiguration(opts.cfgPath, opts.externalcfg, configutil.LoadOrDefaultConfigurationOptions{
|
||||
@ -115,7 +121,21 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := cmdutil.GetClientSet(opts.kubeconfigPath, false)
|
||||
dryRunFlag := cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.DryRun, resetCfg.DryRun, opts.externalcfg.DryRun).(bool)
|
||||
if dryRunFlag {
|
||||
dryRun := apiclient.NewDryRun().WithDefaultMarshalFunction().WithWriter(os.Stdout)
|
||||
dryRun.AppendReactor(dryRun.GetKubeadmConfigReactor()).
|
||||
AppendReactor(dryRun.GetKubeletConfigReactor()).
|
||||
AppendReactor(dryRun.GetKubeProxyConfigReactor())
|
||||
client = dryRun.FakeClient()
|
||||
_, err = os.Stat(opts.kubeconfigPath)
|
||||
if err == nil {
|
||||
err = dryRun.WithKubeConfigFile(opts.kubeconfigPath)
|
||||
}
|
||||
} else {
|
||||
client, err = kubeconfigutil.ClientSetFromFile(opts.kubeconfigPath)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", opts.kubeconfigPath)
|
||||
initCfg, err = configutil.FetchInitConfigurationFromCluster(client, nil, "reset", false, false)
|
||||
@ -162,7 +182,7 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W
|
||||
outputWriter: out,
|
||||
cfg: initCfg,
|
||||
resetCfg: resetCfg,
|
||||
dryRun: cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.DryRun, resetCfg.DryRun, opts.externalcfg.DryRun).(bool),
|
||||
dryRun: dryRunFlag,
|
||||
forceReset: cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.ForceReset, resetCfg.Force, opts.externalcfg.Force).(bool),
|
||||
cleanupTmpDir: cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.CleanupTmpDir, resetCfg.CleanupTmpDir, opts.externalcfg.CleanupTmpDir).(bool),
|
||||
}, nil
|
||||
@ -184,7 +204,7 @@ func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) {
|
||||
)
|
||||
flagSet.BoolVar(
|
||||
&resetOptions.externalcfg.CleanupTmpDir, options.CleanupTmpDir, resetOptions.externalcfg.CleanupTmpDir,
|
||||
fmt.Sprintf("Cleanup the %q directory", path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDirForKubeadm)),
|
||||
fmt.Sprintf("Cleanup the %q directory", path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDir)),
|
||||
)
|
||||
options.AddKubeConfigFlag(flagSet, &resetOptions.kubeconfigPath)
|
||||
options.AddConfigFlag(flagSet, &resetOptions.cfgPath)
|
||||
|
@ -219,6 +219,9 @@ func TestNewResetData(t *testing.T) {
|
||||
resetOptions := newResetOptions()
|
||||
cmd := newCmdReset(nil, nil, resetOptions)
|
||||
|
||||
// make sure all cases use dry-run as we are not constructing a kubeconfig
|
||||
tc.flags[options.DryRun] = "true"
|
||||
|
||||
// sets cmd flags (that will be reflected on the reset options)
|
||||
for f, v := range tc.flags {
|
||||
cmd.Flags().Set(f, v)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
@ -48,7 +49,9 @@ import (
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/output"
|
||||
)
|
||||
|
||||
@ -121,7 +124,7 @@ func newCmdToken(out io.Writer, errW io.Writer) *cobra.Command {
|
||||
|
||||
klog.V(1).Infoln("[token] getting Clientsets from kubeconfig file")
|
||||
kubeConfigFile = cmdutil.GetKubeConfigPath(kubeConfigFile)
|
||||
client, err := cmdutil.GetClientSet(kubeConfigFile, dryRun)
|
||||
client, err := getClientForTokenCommands(kubeConfigFile, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -153,7 +156,7 @@ func newCmdToken(out io.Writer, errW io.Writer) *cobra.Command {
|
||||
`),
|
||||
RunE: func(tokenCmd *cobra.Command, args []string) error {
|
||||
kubeConfigFile = cmdutil.GetKubeConfigPath(kubeConfigFile)
|
||||
client, err := cmdutil.GetClientSet(kubeConfigFile, dryRun)
|
||||
client, err := getClientForTokenCommands(kubeConfigFile, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -187,7 +190,7 @@ func newCmdToken(out io.Writer, errW io.Writer) *cobra.Command {
|
||||
return errors.Errorf("missing argument; 'token delete' is missing token of form %q or %q", bootstrapapi.BootstrapTokenPattern, bootstrapapi.BootstrapTokenIDPattern)
|
||||
}
|
||||
kubeConfigFile = cmdutil.GetKubeConfigPath(kubeConfigFile)
|
||||
client, err := cmdutil.GetClientSet(kubeConfigFile, dryRun)
|
||||
client, err := getClientForTokenCommands(kubeConfigFile, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -426,3 +429,17 @@ func RunDeleteTokens(out io.Writer, client clientset.Interface, tokenIDsOrTokens
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getClientForTokenCommands returns a client to be used with token commands.
|
||||
// When dry-running it includes token specific reactors.
|
||||
func getClientForTokenCommands(file string, dryRun bool) (clientset.Interface, error) {
|
||||
if dryRun {
|
||||
dryRun := apiclient.NewDryRun().WithDefaultMarshalFunction().WithWriter(os.Stdout)
|
||||
dryRun.AppendReactor(dryRun.DeleteBootstrapTokenReactor())
|
||||
if err := dryRun.WithKubeConfigFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dryRun.FakeClient(), nil
|
||||
}
|
||||
return kubeconfigutil.ClientSetFromFile(file)
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4"
|
||||
outputapischeme "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/scheme"
|
||||
outputapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/v1alpha3"
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/output"
|
||||
)
|
||||
|
||||
@ -261,7 +260,7 @@ func TestNewCmdToken(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClientSet(t *testing.T) {
|
||||
func TestGetClientForTokenCommands(t *testing.T) {
|
||||
testConfigTokenFile := "test-config-file"
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "kubeadm-token-test")
|
||||
@ -272,13 +271,13 @@ func TestGetClientSet(t *testing.T) {
|
||||
fullPath := filepath.Join(tmpDir, testConfigTokenFile)
|
||||
|
||||
// test dryRun = false on a non-existing file
|
||||
if _, err = cmdutil.GetClientSet(fullPath, false); err == nil {
|
||||
t.Errorf("GetClientSet(); dry-run: false; did no fail for test file %q: %v", fullPath, err)
|
||||
if _, err = getClientForTokenCommands(fullPath, false); err == nil {
|
||||
t.Errorf("dry-run: false; did no fail for test file %q: %v", fullPath, err)
|
||||
}
|
||||
|
||||
// test dryRun = true on a non-existing file
|
||||
if _, err = cmdutil.GetClientSet(fullPath, true); err == nil {
|
||||
t.Errorf("GetClientSet(); dry-run: true; did no fail for test file %q: %v", fullPath, err)
|
||||
if _, err = getClientForTokenCommands(fullPath, true); err == nil {
|
||||
t.Errorf("dry-run: true; did no fail for test file %q: %v", fullPath, err)
|
||||
}
|
||||
|
||||
f, err := os.Create(fullPath)
|
||||
@ -292,8 +291,8 @@ func TestGetClientSet(t *testing.T) {
|
||||
}
|
||||
|
||||
// test dryRun = true on an existing file
|
||||
if _, err = cmdutil.GetClientSet(fullPath, true); err != nil {
|
||||
t.Errorf("GetClientSet(); dry-run: true; failed for test file %q: %v", fullPath, err)
|
||||
if _, err = getClientForTokenCommands(fullPath, true); err != nil {
|
||||
t.Errorf("dry-run: true; failed for test file %q: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -317,9 +316,9 @@ func TestRunDeleteTokens(t *testing.T) {
|
||||
t.Errorf("Unable to write test file %q: %v", fullPath, err)
|
||||
}
|
||||
|
||||
client, err := cmdutil.GetClientSet(fullPath, true)
|
||||
client, err := getClientForTokenCommands(fullPath, true)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to run GetClientSet() for test file %q: %v", fullPath, err)
|
||||
t.Errorf("unable to create client for test file %q: %v", fullPath, err)
|
||||
}
|
||||
|
||||
// test valid; should not fail
|
||||
|
@ -191,34 +191,32 @@ func runPreflightChecks(client clientset.Interface, ignorePreflightErrors sets.S
|
||||
// getClient gets a real or fake client depending on whether the user is dry-running or not
|
||||
func getClient(file string, dryRun bool) (clientset.Interface, error) {
|
||||
if dryRun {
|
||||
dryRunGetter, err := apiclient.NewClientBackedDryRunGetterFromKubeconfig(file)
|
||||
if err != nil {
|
||||
dryRun := apiclient.NewDryRun()
|
||||
if err := dryRun.WithKubeConfigFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dryRun.WithDefaultMarshalFunction().
|
||||
WithWriter(os.Stdout).
|
||||
PrependReactor(dryRun.HealthCheckJobReactor()).
|
||||
PrependReactor(dryRun.PatchNodeReactor())
|
||||
|
||||
// In order for fakeclient.Discovery().ServerVersion() to return the backing API Server's
|
||||
// real version; we have to do some clever API machinery tricks. First, we get the real
|
||||
// API Server's version
|
||||
realServerVersion, err := dryRunGetter.Client().Discovery().ServerVersion()
|
||||
// API Server's version.
|
||||
realServerVersion, err := dryRun.Client().Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get server version")
|
||||
}
|
||||
|
||||
// Get the fake clientset
|
||||
dryRunOpts := apiclient.GetDefaultDryRunClientOptions(dryRunGetter, os.Stdout)
|
||||
// Print GET and LIST requests
|
||||
dryRunOpts.PrintGETAndLIST = true
|
||||
fakeclient := apiclient.NewDryRunClientWithOpts(dryRunOpts)
|
||||
// As we know the return of Discovery() of the fake clientset is of type *fakediscovery.FakeDiscovery
|
||||
// we can convert it to that struct.
|
||||
fakeclientDiscovery, ok := fakeclient.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
// Obtain the FakeDiscovery object for this fake client.
|
||||
fakeClient := dryRun.FakeClient()
|
||||
fakeClientDiscovery, ok := fakeClient.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
if !ok {
|
||||
return nil, errors.New("couldn't set fake discovery's server version")
|
||||
return nil, errors.New("could not set fake discovery's server version")
|
||||
}
|
||||
// Lastly, set the right server version to be used
|
||||
fakeclientDiscovery.FakedServerVersion = realServerVersion
|
||||
// return the fake clientset used for dry-running
|
||||
return fakeclient, nil
|
||||
// Lastly, set the right server version to be used.
|
||||
fakeClientDiscovery.FakedServerVersion = realServerVersion
|
||||
|
||||
return fakeClient, nil
|
||||
}
|
||||
return kubeconfigutil.ClientSetFromFile(file)
|
||||
}
|
||||
|
@ -20,22 +20,18 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
)
|
||||
|
||||
// ErrorSubcommandRequired is an error returned when a parent command cannot be executed.
|
||||
@ -115,18 +111,6 @@ func InteractivelyConfirmAction(action, question string, r io.Reader) error {
|
||||
return errors.New("won't proceed; the user didn't answer (Y|y) in order to continue")
|
||||
}
|
||||
|
||||
// GetClientSet gets a real or fake client depending on whether the user is dry-running or not
|
||||
func GetClientSet(file string, dryRun bool) (clientset.Interface, error) {
|
||||
if dryRun {
|
||||
dryRunGetter, err := apiclient.NewClientBackedDryRunGetterFromKubeconfig(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil
|
||||
}
|
||||
return kubeconfigutil.ClientSetFromFile(file)
|
||||
}
|
||||
|
||||
// ValueFromFlagsOrConfig checks if the "name" flag has been set. If yes, it returns the value of the flag, otherwise it returns the value from config.
|
||||
func ValueFromFlagsOrConfig(flagSet *pflag.FlagSet, name string, cfgValue interface{}, flagValue interface{}) interface{} {
|
||||
if flagSet.Changed(name) {
|
||||
|
@ -38,9 +38,9 @@ const (
|
||||
KubernetesDir = "/etc/kubernetes"
|
||||
// ManifestsSubDirName defines directory name to store manifests
|
||||
ManifestsSubDirName = "manifests"
|
||||
// TempDirForKubeadm defines temporary directory for kubeadm
|
||||
// TempDir defines temporary directory for kubeadm
|
||||
// should be joined with KubernetesDir.
|
||||
TempDirForKubeadm = "tmp"
|
||||
TempDir = "tmp"
|
||||
|
||||
// CertificateBackdate defines the offset applied to notBefore for CA certificates generated by kubeadm
|
||||
CertificateBackdate = time.Minute * 5
|
||||
@ -455,6 +455,13 @@ const (
|
||||
ServiceAccountKeyReadersGroupName string = "kubeadm-sa-key-readers"
|
||||
// UpgradeConfigurationKind is the string kind value for the UpgradeConfiguration struct
|
||||
UpgradeConfigurationKind = "UpgradeConfiguration"
|
||||
|
||||
// EnvVarInitDryRunDir has the environment variable for init dry run directory override.
|
||||
EnvVarInitDryRunDir = "KUBEADM_INIT_DRYRUN_DIR"
|
||||
// EnvVarJoinDryRunDir has the environment variable for join dry run directory override.
|
||||
EnvVarJoinDryRunDir = "KUBEADM_JOIN_DRYRUN_DIR"
|
||||
// EnvVarUpgradeDryRunDir has the environment variable for upgrade dry run directory override.
|
||||
EnvVarUpgradeDryRunDir = "KUBEADM_UPGRADE_DRYRUN_DIR"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -586,30 +593,52 @@ func GetKubeletKubeConfigPath() string {
|
||||
return filepath.Join(KubernetesDir, KubeletKubeConfigFileName)
|
||||
}
|
||||
|
||||
// CreateTempDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp (not using /tmp as that would potentially be dangerous)
|
||||
func CreateTempDirForKubeadm(kubernetesDir, dirName string) (string, error) {
|
||||
tempDir := filepath.Join(KubernetesDir, TempDirForKubeadm)
|
||||
if len(kubernetesDir) != 0 {
|
||||
tempDir = filepath.Join(kubernetesDir, TempDirForKubeadm)
|
||||
// GetDryRunDir creates a temporary directory under /etc/kubernetes/tmp.
|
||||
// If the environment variable with name stored in envVar is set, it is used instead.
|
||||
// msgFunc will be used to print a message to the user that they can use envVar for override.
|
||||
func GetDryRunDir(envVar, dirName string, msgFunc func(format string, args ...interface{})) (string, error) {
|
||||
envVarDir := os.Getenv(envVar)
|
||||
if len(envVarDir) > 0 {
|
||||
return envVarDir, nil
|
||||
}
|
||||
tempDir := filepath.Join(KubernetesDir, TempDir)
|
||||
generatedDir, err := createTmpDir(tempDir, dirName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// creates target folder if not already exists
|
||||
msgFunc("Using dry-run directory %s. To override it, set the environment variable %s",
|
||||
generatedDir, envVar)
|
||||
|
||||
return generatedDir, nil
|
||||
}
|
||||
|
||||
// CreateTempDir creates a temporary directory under /etc/kubernetes/tmp
|
||||
// or under the provided parent directory if it's set.
|
||||
func CreateTempDir(parent, dirName string) (string, error) {
|
||||
tempDir := filepath.Join(KubernetesDir, TempDir)
|
||||
if len(parent) > 0 {
|
||||
tempDir = filepath.Join(parent, TempDir)
|
||||
}
|
||||
return createTmpDir(tempDir, dirName)
|
||||
}
|
||||
|
||||
func createTmpDir(tempDir, dirName string) (string, error) {
|
||||
if err := os.MkdirAll(tempDir, 0700); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create directory %q", tempDir)
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tempDir, dirName)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't create a temporary directory")
|
||||
return "", errors.Wrapf(err, "could not create a temporary directory in %q", tempDir)
|
||||
}
|
||||
return tempDir, nil
|
||||
}
|
||||
|
||||
// CreateTimestampDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp formatted with the current date
|
||||
func CreateTimestampDirForKubeadm(kubernetesDir, dirName string) (string, error) {
|
||||
tempDir := filepath.Join(KubernetesDir, TempDirForKubeadm)
|
||||
// CreateTimestampDir is a function that creates a temporary directory under /etc/kubernetes/tmp formatted with the current date
|
||||
func CreateTimestampDir(kubernetesDir, dirName string) (string, error) {
|
||||
tempDir := filepath.Join(KubernetesDir, TempDir)
|
||||
if len(kubernetesDir) != 0 {
|
||||
tempDir = filepath.Join(kubernetesDir, TempDirForKubeadm)
|
||||
tempDir = filepath.Join(kubernetesDir, TempDir)
|
||||
}
|
||||
|
||||
// creates target folder if not already exists
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@ -37,10 +38,10 @@ const TokenUser = "tls-bootstrap-token-user"
|
||||
|
||||
// For returns a kubeconfig object that can be used for doing the TLS Bootstrap with the right credentials
|
||||
// Also, before returning anything, it makes sure it can trust the API Server
|
||||
func For(cfg *kubeadmapi.JoinConfiguration) (*clientcmdapi.Config, error) {
|
||||
func For(client clientset.Interface, cfg *kubeadmapi.JoinConfiguration) (*clientcmdapi.Config, error) {
|
||||
// TODO: Print summary info about the CA certificate, along with the checksum signature
|
||||
// we also need an ability for the user to configure the client to validate received CA cert against a checksum
|
||||
config, err := DiscoverValidatedKubeConfig(cfg)
|
||||
config, err := DiscoverValidatedKubeConfig(client, cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't validate the identity of the API Server")
|
||||
}
|
||||
@ -71,7 +72,7 @@ func For(cfg *kubeadmapi.JoinConfiguration) (*clientcmdapi.Config, error) {
|
||||
}
|
||||
|
||||
// DiscoverValidatedKubeConfig returns a validated Config object that specifies where the cluster is and the CA cert to trust
|
||||
func DiscoverValidatedKubeConfig(cfg *kubeadmapi.JoinConfiguration) (*clientcmdapi.Config, error) {
|
||||
func DiscoverValidatedKubeConfig(dryRunClient clientset.Interface, cfg *kubeadmapi.JoinConfiguration) (*clientcmdapi.Config, error) {
|
||||
timeout := cfg.Timeouts.Discovery.Duration
|
||||
switch {
|
||||
case cfg.Discovery.File != nil:
|
||||
@ -81,7 +82,7 @@ func DiscoverValidatedKubeConfig(cfg *kubeadmapi.JoinConfiguration) (*clientcmda
|
||||
}
|
||||
return file.RetrieveValidatedConfigInfo(kubeConfigPath, timeout)
|
||||
case cfg.Discovery.BootstrapToken != nil:
|
||||
return token.RetrieveValidatedConfigInfo(&cfg.Discovery, timeout)
|
||||
return token.RetrieveValidatedConfigInfo(dryRunClient, &cfg.Discovery, timeout)
|
||||
default:
|
||||
return nil, errors.New("couldn't find a valid discovery configuration")
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
)
|
||||
@ -76,7 +77,8 @@ func TestFor(t *testing.T) {
|
||||
config.Timeouts = &kubeadm.Timeouts{
|
||||
Discovery: &metav1.Duration{Duration: 1 * time.Minute},
|
||||
}
|
||||
_, actual := For(&config)
|
||||
client := fakeclient.NewSimpleClientset()
|
||||
_, actual := For(client, &config)
|
||||
if (actual == nil) != rt.expect {
|
||||
t.Errorf(
|
||||
"failed For:\n\texpected: %t\n\t actual: %t",
|
||||
|
@ -49,23 +49,16 @@ const BootstrapUser = "token-bootstrap-client"
|
||||
// RetrieveValidatedConfigInfo connects to the API Server and tries to fetch the cluster-info ConfigMap
|
||||
// It then makes sure it can trust the API Server by looking at the JWS-signed tokens and (if CACertHashes is not empty)
|
||||
// validating the cluster CA against a set of pinned public keys
|
||||
func RetrieveValidatedConfigInfo(cfg *kubeadmapi.Discovery, timeout time.Duration) (*clientcmdapi.Config, error) {
|
||||
return retrieveValidatedConfigInfo(nil, cfg, constants.DiscoveryRetryInterval, timeout)
|
||||
func RetrieveValidatedConfigInfo(dryRunClient clientset.Interface, cfg *kubeadmapi.Discovery, timeout time.Duration) (*clientcmdapi.Config, error) {
|
||||
isDryRun := dryRunClient != nil
|
||||
isTesting := false
|
||||
return retrieveValidatedConfigInfo(dryRunClient, cfg, constants.DiscoveryRetryInterval, timeout, isDryRun, isTesting)
|
||||
}
|
||||
|
||||
// retrieveValidatedConfigInfo is a private implementation of RetrieveValidatedConfigInfo.
|
||||
// It accepts an optional clientset that can be used for testing purposes.
|
||||
func retrieveValidatedConfigInfo(client clientset.Interface, cfg *kubeadmapi.Discovery, interval, timeout time.Duration) (*clientcmdapi.Config, error) {
|
||||
token, err := bootstraptokenv1.NewBootstrapTokenString(cfg.BootstrapToken.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load the CACertHashes into a pubkeypin.Set
|
||||
pubKeyPins := pubkeypin.NewSet()
|
||||
if err = pubKeyPins.Allow(cfg.BootstrapToken.CACertHashes...); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid discovery token CA certificate hash")
|
||||
}
|
||||
func retrieveValidatedConfigInfo(client clientset.Interface, cfg *kubeadmapi.Discovery, interval, timeout time.Duration, isDryRun, isTesting bool) (*clientcmdapi.Config, error) {
|
||||
var err error
|
||||
|
||||
// Make sure the interval is not bigger than the duration
|
||||
if interval > timeout {
|
||||
@ -73,11 +66,28 @@ func retrieveValidatedConfigInfo(client clientset.Interface, cfg *kubeadmapi.Dis
|
||||
}
|
||||
|
||||
endpoint := cfg.BootstrapToken.APIServerEndpoint
|
||||
insecureBootstrapConfig := buildInsecureBootstrapKubeConfig(endpoint, kubeadmapiv1.DefaultClusterName)
|
||||
insecureBootstrapConfig := BuildInsecureBootstrapKubeConfig(endpoint)
|
||||
clusterName := insecureBootstrapConfig.Contexts[insecureBootstrapConfig.CurrentContext].Cluster
|
||||
|
||||
klog.V(1).Infof("[discovery] Created cluster-info discovery client, requesting info from %q", endpoint)
|
||||
insecureClusterInfo, err := getClusterInfo(client, insecureBootstrapConfig, token, interval, timeout)
|
||||
if !isDryRun && !isTesting {
|
||||
client, err = kubeconfigutil.ToClientSet(insecureBootstrapConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
insecureClusterInfo, err := getClusterInfo(client, cfg, interval, timeout, isDryRun)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load the CACertHashes into a pubkeypin.Set
|
||||
pubKeyPins := pubkeypin.NewSet()
|
||||
if err := pubKeyPins.Allow(cfg.BootstrapToken.CACertHashes...); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid discovery token CA certificate hash")
|
||||
}
|
||||
|
||||
token, err := bootstraptokenv1.NewBootstrapTokenString(cfg.BootstrapToken.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -115,7 +125,13 @@ func retrieveValidatedConfigInfo(client clientset.Interface, cfg *kubeadmapi.Dis
|
||||
secureBootstrapConfig := buildSecureBootstrapKubeConfig(endpoint, clusterCABytes, clusterName)
|
||||
|
||||
klog.V(1).Infof("[discovery] Requesting info from %q again to validate TLS against the pinned public key", endpoint)
|
||||
secureClusterInfo, err := getClusterInfo(client, secureBootstrapConfig, token, interval, timeout)
|
||||
if !isDryRun && !isTesting {
|
||||
client, err = kubeconfigutil.ToClientSet(secureBootstrapConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
secureClusterInfo, err := getClusterInfo(client, cfg, interval, timeout, isDryRun)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -136,11 +152,12 @@ func retrieveValidatedConfigInfo(client clientset.Interface, cfg *kubeadmapi.Dis
|
||||
return secureKubeconfig, nil
|
||||
}
|
||||
|
||||
// buildInsecureBootstrapKubeConfig makes a kubeconfig object that connects insecurely to the API Server for bootstrapping purposes
|
||||
func buildInsecureBootstrapKubeConfig(endpoint, clustername string) *clientcmdapi.Config {
|
||||
// BuildInsecureBootstrapKubeConfig makes a kubeconfig object that connects insecurely to the API Server for bootstrapping purposes
|
||||
func BuildInsecureBootstrapKubeConfig(endpoint string) *clientcmdapi.Config {
|
||||
controlPlaneEndpoint := fmt.Sprintf("https://%s", endpoint)
|
||||
bootstrapConfig := kubeconfigutil.CreateBasic(controlPlaneEndpoint, clustername, BootstrapUser, []byte{})
|
||||
bootstrapConfig.Clusters[clustername].InsecureSkipTLSVerify = true
|
||||
clusterName := kubeadmapiv1.DefaultClusterName
|
||||
bootstrapConfig := kubeconfigutil.CreateBasic(controlPlaneEndpoint, clusterName, BootstrapUser, []byte{})
|
||||
bootstrapConfig.Clusters[clusterName].InsecureSkipTLSVerify = true
|
||||
return bootstrapConfig
|
||||
}
|
||||
|
||||
@ -192,30 +209,29 @@ func validateClusterCA(insecureConfig *clientcmdapi.Config, pubKeyPins *pubkeypi
|
||||
return clusterCABytes, nil
|
||||
}
|
||||
|
||||
// getClusterInfo creates a client from the given kubeconfig if the given client is nil,
|
||||
// and requests the cluster info ConfigMap using PollImmediate.
|
||||
// If a client is provided it will be used instead.
|
||||
func getClusterInfo(client clientset.Interface, kubeconfig *clientcmdapi.Config, token *bootstraptokenv1.BootstrapTokenString, interval, duration time.Duration) (*v1.ConfigMap, error) {
|
||||
var cm *v1.ConfigMap
|
||||
var err error
|
||||
// getClusterInfo requests the cluster-info ConfigMap with the provided client.
|
||||
func getClusterInfo(client clientset.Interface, cfg *kubeadmapi.Discovery, interval, duration time.Duration, dryRun bool) (*v1.ConfigMap, error) {
|
||||
var (
|
||||
cm *v1.ConfigMap
|
||||
err error
|
||||
lastError error
|
||||
)
|
||||
|
||||
// Create client from kubeconfig
|
||||
if client == nil {
|
||||
client, err = kubeconfigutil.ToClientSet(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).Infof("[discovery] Waiting for the cluster-info ConfigMap to receive a JWS signature"+
|
||||
"for token ID %q", token.ID)
|
||||
|
||||
var lastError error
|
||||
err = wait.PollUntilContextTimeout(context.Background(),
|
||||
interval, duration, true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
token, err := bootstraptokenv1.NewBootstrapTokenString(cfg.BootstrapToken.Token)
|
||||
if err != nil {
|
||||
lastError = errors.Wrapf(err, "could not construct token string for token: %s",
|
||||
cfg.BootstrapToken.Token)
|
||||
return true, lastError
|
||||
}
|
||||
|
||||
klog.V(1).Infof("[discovery] Waiting for the cluster-info ConfigMap to receive a JWS signature"+
|
||||
"for token ID %q", token.ID)
|
||||
|
||||
cm, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).
|
||||
Get(ctx, bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||
Get(context.Background(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
lastError = errors.Wrapf(err, "failed to request the cluster-info ConfigMap")
|
||||
klog.V(1).Infof("[discovery] Retrying due to error: %v", lastError)
|
||||
@ -225,6 +241,12 @@ func getClusterInfo(client clientset.Interface, kubeconfig *clientcmdapi.Config,
|
||||
if _, ok := cm.Data[bootstrapapi.JWSSignatureKeyPrefix+token.ID]; !ok {
|
||||
lastError = errors.Errorf("could not find a JWS signature in the cluster-info ConfigMap"+
|
||||
" for token ID %q", token.ID)
|
||||
if dryRun {
|
||||
// Assume the user is dry-running with a token that will never appear in the cluster-info
|
||||
// ConfigMap. Use the default dry-run token and CA cert hash.
|
||||
mutateTokenDiscoveryForDryRun(cfg)
|
||||
return false, nil
|
||||
}
|
||||
klog.V(1).Infof("[discovery] Retrying due to error: %v", lastError)
|
||||
return false, nil
|
||||
}
|
||||
@ -236,3 +258,28 @@ func getClusterInfo(client clientset.Interface, kubeconfig *clientcmdapi.Config,
|
||||
|
||||
return cm, nil
|
||||
}
|
||||
|
||||
// mutateTokenDiscoveryForDryRun mutates the JoinConfiguration.Discovery so that it includes a dry-run token
|
||||
// CA cert hash and fake API server endpoint to comply with the fake "cluster-info" ConfigMap
|
||||
// that this reactor returns. The information here should be in sync with what the GetClusterInfoReactor()
|
||||
// dry-run reactor does.
|
||||
func mutateTokenDiscoveryForDryRun(cfg *kubeadmapi.Discovery) {
|
||||
const (
|
||||
tokenID = "abcdef"
|
||||
tokenSecret = "abcdef0123456789"
|
||||
caHash = "sha256:3b793efefe27a19f93b0fbe6e637e9c41d0dde8a377d6ab1c0f656bf1136dd8a"
|
||||
endpoint = "https://192.168.0.101:6443"
|
||||
)
|
||||
|
||||
token := fmt.Sprintf("%s.%s", tokenID, tokenSecret)
|
||||
klog.Warningf("[dryrun] Mutating the JoinConfiguration.Discovery.BootstrapToken to satisfy "+
|
||||
"the dry-run without a real cluster-info ConfigMap:\n"+
|
||||
" Token: %s\n CACertHash: %s\n APIServerEndpoint: %s\n",
|
||||
token, caHash, endpoint)
|
||||
if cfg.BootstrapToken == nil {
|
||||
cfg.BootstrapToken = &kubeadmapi.BootstrapTokenDiscovery{}
|
||||
}
|
||||
cfg.BootstrapToken.Token = token
|
||||
cfg.BootstrapToken.CACertHashes = append(cfg.BootstrapToken.CACertHashes, caHash)
|
||||
cfg.BootstrapToken.APIServerEndpoint = endpoint
|
||||
}
|
||||
|
@ -263,13 +263,13 @@ users: null
|
||||
}
|
||||
|
||||
// Retrieve validated configuration
|
||||
kubeconfig, err = retrieveValidatedConfigInfo(client, test.cfg, interval, timeout)
|
||||
kubeconfig, err = retrieveValidatedConfigInfo(client, test.cfg, interval, timeout, false, true)
|
||||
if (err != nil) != test.expectedError {
|
||||
t.Errorf("expected error %v, got %v, error: %v", test.expectedError, err != nil, err)
|
||||
}
|
||||
|
||||
// Return if an error is expected
|
||||
if test.expectedError {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -204,8 +205,14 @@ func createKubeProxyConfigMap(cfg *kubeadmapi.ClusterConfiguration, localEndpoin
|
||||
if err != nil {
|
||||
return []byte(""), errors.Wrap(err, "error when marshaling")
|
||||
}
|
||||
|
||||
// Indent the proxy CM bytes with 4 spaces to comply with the location in the template.
|
||||
var prefixBytes bytes.Buffer
|
||||
apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ")
|
||||
scanner := bufio.NewScanner(bytes.NewReader(proxyBytes))
|
||||
for scanner.Scan() {
|
||||
fmt.Fprintf(&prefixBytes, " %s\n", scanner.Text())
|
||||
}
|
||||
|
||||
configMapBytes, err := kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
|
||||
struct {
|
||||
ControlPlaneEndpoint string
|
||||
|
@ -41,6 +41,9 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/output"
|
||||
)
|
||||
|
||||
// createJobHealthCheckPrefix is name prefix for the Job health check.
|
||||
const createJobHealthCheckPrefix = "upgrade-health-check"
|
||||
|
||||
// healthCheck is a helper struct for easily performing healthchecks against the cluster and printing the output
|
||||
type healthCheck struct {
|
||||
name string
|
||||
@ -94,7 +97,6 @@ func CheckClusterHealth(client clientset.Interface, cfg *kubeadmapi.ClusterConfi
|
||||
// createJob is a check that verifies that a Job can be created in the cluster
|
||||
func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) error {
|
||||
const (
|
||||
prefix = "upgrade-health-check"
|
||||
fieldSelector = "spec.unschedulable=false"
|
||||
ns = metav1.NamespaceSystem
|
||||
timeout = 15 * time.Second
|
||||
@ -107,13 +109,6 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
||||
listOptions = metav1.ListOptions{Limit: 1, FieldSelector: fieldSelector}
|
||||
)
|
||||
|
||||
// If client.Discovery().RESTClient() is nil, the fake client is used.
|
||||
// Return early because the kubeadm dryrun dynamic client only handles the core/v1 GroupVersion.
|
||||
if client.Discovery().RESTClient() == nil {
|
||||
fmt.Printf("[upgrade/health] Would create the Job with the prefix %q in namespace %q and wait until it completes\n", prefix, ns)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if there is at least one Node where a Job's Pod can schedule. If not, skip this preflight check.
|
||||
err = wait.PollUntilContextTimeout(ctx, time.Second*1, timeout, true, func(_ context.Context) (bool, error) {
|
||||
nodes, err = client.CoreV1().Nodes().List(context.Background(), listOptions)
|
||||
@ -136,11 +131,15 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
||||
// Adding a margin of error to the polling timeout.
|
||||
timeoutWithMargin := timeout.Seconds() + timeoutMargin.Seconds()
|
||||
|
||||
// Do not use ObjectMeta.GenerateName to avoid the problem where the dry-run client for upgrade cannot obtain
|
||||
// a Name for this Job during the GET call right after the CREATE call.
|
||||
jobName := fmt.Sprintf("%s-%d", createJobHealthCheckPrefix, time.Now().UTC().UnixMilli())
|
||||
|
||||
// Prepare Job
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: prefix + "-",
|
||||
Namespace: ns,
|
||||
Name: jobName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
BackoffLimit: ptr.To[int32](0),
|
||||
@ -162,7 +161,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: prefix,
|
||||
Name: createJobHealthCheckPrefix,
|
||||
Image: images.GetPauseImage(cfg),
|
||||
Args: []string{"-v"},
|
||||
},
|
||||
@ -173,21 +172,18 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
||||
}
|
||||
|
||||
// Create the Job, but retry if it fails
|
||||
klog.V(2).Infof("Creating a Job with the prefix %q in the namespace %q", prefix, ns)
|
||||
var jobName string
|
||||
klog.V(2).Infof("Creating a Job %q in the namespace %q", jobName, ns)
|
||||
err = wait.PollUntilContextTimeout(ctx, time.Second*1, timeout, true, func(_ context.Context) (bool, error) {
|
||||
createdJob, err := client.BatchV1().Jobs(ns).Create(context.Background(), job, metav1.CreateOptions{})
|
||||
_, err := client.BatchV1().Jobs(ns).Create(context.Background(), job, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create a Job with the prefix %q in the namespace %q, retrying: %v", prefix, ns, err)
|
||||
klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
||||
lastError = err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
jobName = createdJob.Name
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(lastError, "could not create a Job with the prefix %q in the namespace %q", prefix, ns)
|
||||
return errors.Wrapf(lastError, "could not create Job %q in the namespace %q", jobName, ns)
|
||||
}
|
||||
|
||||
// Wait for the Job to complete
|
||||
|
@ -104,10 +104,13 @@ func WriteKubeletConfigFiles(cfg *kubeadmapi.InitConfiguration, patchesDir strin
|
||||
}
|
||||
|
||||
// Create a copy of the kubelet config file in the /etc/kubernetes/tmp/ folder.
|
||||
backupDir, err := kubeadmconstants.CreateTempDirForKubeadm(kubeadmconstants.KubernetesDir, "kubeadm-kubelet-config")
|
||||
backupDir, err := kubeadmconstants.CreateTempDir("", "kubeadm-kubelet-config")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Warningf("Using temporary directory %s for kubelet config. To override it set the environment variable %s",
|
||||
backupDir, kubeadmconstants.EnvVarUpgradeDryRunDir)
|
||||
|
||||
src := filepath.Join(kubeletDir, kubeadmconstants.KubeletConfigurationFileName)
|
||||
dest := filepath.Join(backupDir, kubeadmconstants.KubeletConfigurationFileName)
|
||||
|
||||
@ -139,7 +142,7 @@ func WriteKubeletConfigFiles(cfg *kubeadmapi.InitConfiguration, patchesDir strin
|
||||
// GetKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not.
|
||||
func GetKubeletDir(dryRun bool) (string, error) {
|
||||
if dryRun {
|
||||
return kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-upgrade-dryrun")
|
||||
return kubeadmconstants.CreateTempDir("", "kubeadm-upgrade-dryrun")
|
||||
}
|
||||
return kubeadmconstants.KubeletRunDirectory, nil
|
||||
}
|
||||
|
@ -99,16 +99,15 @@ func NewKubeStaticPodPathManager(kubernetesDir, patchesDir, tempDir, backupDir,
|
||||
|
||||
// NewKubeStaticPodPathManagerUsingTempDirs creates a new instance of KubeStaticPodPathManager with temporary directories backing it
|
||||
func NewKubeStaticPodPathManagerUsingTempDirs(kubernetesDir, patchesDir string, saveManifestsDir, saveEtcdDir bool) (StaticPodPathManager, error) {
|
||||
|
||||
upgradedManifestsDir, err := constants.CreateTempDirForKubeadm(kubernetesDir, "kubeadm-upgraded-manifests")
|
||||
upgradedManifestsDir, err := constants.CreateTempDir(kubernetesDir, "kubeadm-upgraded-manifests")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backupManifestsDir, err := constants.CreateTimestampDirForKubeadm(kubernetesDir, "kubeadm-backup-manifests")
|
||||
backupManifestsDir, err := constants.CreateTimestampDir(kubernetesDir, "kubeadm-backup-manifests")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backupEtcdDir, err := constants.CreateTimestampDirForKubeadm(kubernetesDir, "kubeadm-backup-etcd")
|
||||
backupEtcdDir, err := constants.CreateTimestampDir(kubernetesDir, "kubeadm-backup-etcd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -650,11 +649,11 @@ func PerformStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter
|
||||
|
||||
// DryRunStaticPodUpgrade fakes an upgrade of the control plane
|
||||
func DryRunStaticPodUpgrade(patchesDir string, internalcfg *kubeadmapi.InitConfiguration) error {
|
||||
|
||||
dryRunManifestDir, err := constants.CreateTempDirForKubeadm("", "kubeadm-upgrade-dryrun")
|
||||
dryRunManifestDir, err := constants.GetDryRunDir(constants.EnvVarUpgradeDryRunDir, "kubeadm-upgrade-dryrun", klog.Warningf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.RemoveAll(dryRunManifestDir)
|
||||
if err := controlplane.CreateInitStaticPodManifestFiles(dryRunManifestDir, patchesDir, internalcfg, true /* isDryRun */); err != nil {
|
||||
return err
|
||||
|
@ -1,134 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// ClientBackedDryRunGetter implements the DryRunGetter interface for use in NewDryRunClient() and proxies all GET and LIST requests to the backing API server reachable via rest.Config
|
||||
type ClientBackedDryRunGetter struct {
|
||||
client clientset.Interface
|
||||
dynamicClient dynamic.Interface
|
||||
}
|
||||
|
||||
// InitDryRunGetter should implement the DryRunGetter interface
|
||||
var _ DryRunGetter = &ClientBackedDryRunGetter{}
|
||||
|
||||
// NewClientBackedDryRunGetter creates a new ClientBackedDryRunGetter instance based on the rest.Config object
|
||||
func NewClientBackedDryRunGetter(config *rest.Config) (*ClientBackedDryRunGetter, error) {
|
||||
client, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ClientBackedDryRunGetter{
|
||||
client: client,
|
||||
dynamicClient: dynamicClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewClientBackedDryRunGetterFromKubeconfig creates a new ClientBackedDryRunGetter instance from the given KubeConfig file
|
||||
func NewClientBackedDryRunGetterFromKubeconfig(file string) (*ClientBackedDryRunGetter, error) {
|
||||
config, err := clientcmd.LoadFromFile(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load kubeconfig")
|
||||
}
|
||||
clientConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create API client configuration from kubeconfig")
|
||||
}
|
||||
return NewClientBackedDryRunGetter(clientConfig)
|
||||
}
|
||||
|
||||
// HandleGetAction handles GET actions to the dryrun clientset this interface supports
|
||||
func (clg *ClientBackedDryRunGetter) HandleGetAction(action core.GetAction) (bool, runtime.Object, error) {
|
||||
unstructuredObj, err := clg.dynamicClient.Resource(action.GetResource()).Namespace(action.GetNamespace()).Get(context.TODO(), action.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Inform the user that the requested object wasn't found.
|
||||
printIfNotExists(err)
|
||||
return true, nil, err
|
||||
}
|
||||
newObj, err := decodeUnstructuredIntoAPIObject(action, unstructuredObj)
|
||||
if err != nil {
|
||||
fmt.Printf("error after decode: %v %v\n", unstructuredObj, err)
|
||||
return true, nil, err
|
||||
}
|
||||
return true, newObj, err
|
||||
}
|
||||
|
||||
// HandleListAction handles LIST actions to the dryrun clientset this interface supports
|
||||
func (clg *ClientBackedDryRunGetter) HandleListAction(action core.ListAction) (bool, runtime.Object, error) {
|
||||
listOpts := metav1.ListOptions{
|
||||
LabelSelector: action.GetListRestrictions().Labels.String(),
|
||||
FieldSelector: action.GetListRestrictions().Fields.String(),
|
||||
}
|
||||
|
||||
unstructuredList, err := clg.dynamicClient.Resource(action.GetResource()).Namespace(action.GetNamespace()).List(context.TODO(), listOpts)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
newObj, err := decodeUnstructuredIntoAPIObject(action, unstructuredList)
|
||||
if err != nil {
|
||||
fmt.Printf("error after decode: %v %v\n", unstructuredList, err)
|
||||
return true, nil, err
|
||||
}
|
||||
return true, newObj, err
|
||||
}
|
||||
|
||||
// Client gets the backing clientset.Interface
|
||||
func (clg *ClientBackedDryRunGetter) Client() clientset.Interface {
|
||||
return clg.client
|
||||
}
|
||||
|
||||
// decodeUnstructuredIntoAPIObject converts the *unversioned.Unversioned object returned from the dynamic client
|
||||
// to bytes; and then decodes it back _to an external api version (k8s.io/api)_ using the normal API machinery
|
||||
func decodeUnstructuredIntoAPIObject(action core.Action, unstructuredObj runtime.Unstructured) (runtime.Object, error) {
|
||||
objBytes, err := json.Marshal(unstructuredObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObj, err := runtime.Decode(clientsetscheme.Codecs.UniversalDecoder(action.GetResource().GroupVersion()), objBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
func printIfNotExists(err error) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Println("[dryrun] The GET request didn't yield any result, the API Server returned a NotFound error.")
|
||||
}
|
||||
}
|
635
cmd/kubeadm/app/util/apiclient/dryrun.go
Normal file
635
cmd/kubeadm/app/util/apiclient/dryrun.go
Normal file
@ -0,0 +1,635 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package apiclient contains wrapping logic for Kubernetes API clients.
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
)
|
||||
|
||||
// DryRun is responsible for performing verbose dry-run operations with a set of different
|
||||
// API clients. Any REST action that reaches the FakeClient() of a DryRun will be processed
|
||||
// as follows by the fake client reactor chain:
|
||||
// - Log the action.
|
||||
// - If the action is not GET or LIST just use the fake client store to write it, unless
|
||||
// a user reactor was added with PrependReactor() or AppendReactor().
|
||||
// - Attempt to GET or LIST using the real dynamic client.
|
||||
// - If the above fails try to GET or LIST the object from the fake client store, unless
|
||||
// a user reactor was added with PrependReactor() or AppendReactor().
|
||||
type DryRun struct {
|
||||
fakeClient *fake.Clientset
|
||||
client clientset.Interface
|
||||
dynamicClient dynamic.Interface
|
||||
|
||||
writer io.Writer
|
||||
marshalFunc func(runtime.Object, schema.GroupVersion) ([]byte, error)
|
||||
}
|
||||
|
||||
// NewDryRun creates a new DryRun object that only has a fake client.
|
||||
func NewDryRun() *DryRun {
|
||||
d := &DryRun{}
|
||||
d.fakeClient = fake.NewSimpleClientset()
|
||||
d.addReactors()
|
||||
return d
|
||||
}
|
||||
|
||||
// WithKubeConfigFile takes a file path and creates real clientset and dynamic clients.
|
||||
func (d *DryRun) WithKubeConfigFile(file string) error {
|
||||
config, err := clientcmd.LoadFromFile(file)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to load kubeconfig")
|
||||
}
|
||||
return d.WithKubeConfig(config)
|
||||
}
|
||||
|
||||
// WithKubeConfig takes a Config (kubeconfig) and creates real clientset and dynamic client.
|
||||
func (d *DryRun) WithKubeConfig(config *clientcmdapi.Config) error {
|
||||
restConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create API client configuration from kubeconfig")
|
||||
}
|
||||
return d.WithRestConfig(restConfig)
|
||||
}
|
||||
|
||||
// WithRestConfig takes a rest Config and creates real clientset and dynamic clients.
|
||||
func (d *DryRun) WithRestConfig(config *rest.Config) error {
|
||||
var err error
|
||||
|
||||
d.client, err = clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.dynamicClient, err = dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithWriter sets the io.Writer used for printing by the DryRun.
|
||||
func (d *DryRun) WithWriter(w io.Writer) *DryRun {
|
||||
d.writer = w
|
||||
return d
|
||||
}
|
||||
|
||||
// WithMarshalFunction sets the DryRun marshal function.
|
||||
func (d *DryRun) WithMarshalFunction(f func(runtime.Object, schema.GroupVersion) ([]byte, error)) *DryRun {
|
||||
d.marshalFunc = f
|
||||
return d
|
||||
}
|
||||
|
||||
// WithDefaultMarshalFunction sets the DryRun marshal function to the default one.
|
||||
func (d *DryRun) WithDefaultMarshalFunction() *DryRun {
|
||||
d.WithMarshalFunction(kubeadmutil.MarshalToYaml)
|
||||
return d
|
||||
}
|
||||
|
||||
// PrependReactor prepends a new reactor in the fake client ReactorChain at position 1.
|
||||
// Keeps position 0 for the log reactor:
|
||||
// [ log, r, ... rest of the chain, default fake client reactor ]
|
||||
func (d *DryRun) PrependReactor(r *testing.SimpleReactor) *DryRun {
|
||||
log := d.fakeClient.Fake.ReactionChain[0]
|
||||
chain := make([]testing.Reactor, len(d.fakeClient.Fake.ReactionChain)+1)
|
||||
chain[0] = log
|
||||
chain[1] = r
|
||||
copy(chain[2:], d.fakeClient.Fake.ReactionChain[1:])
|
||||
d.fakeClient.Fake.ReactionChain = chain
|
||||
return d
|
||||
}
|
||||
|
||||
// AppendReactor appends a new reactor in the fake client ReactorChain at position len-2.
|
||||
// Keeps position len-1 for the default fake client reactor.
|
||||
// [ log, rest of the chain... , r, default fake client reactor ]
|
||||
func (d *DryRun) AppendReactor(r *testing.SimpleReactor) *DryRun {
|
||||
sz := len(d.fakeClient.Fake.ReactionChain)
|
||||
def := d.fakeClient.Fake.ReactionChain[sz-1]
|
||||
d.fakeClient.Fake.ReactionChain[sz-1] = r
|
||||
d.fakeClient.Fake.ReactionChain = append(d.fakeClient.Fake.ReactionChain, def)
|
||||
return d
|
||||
}
|
||||
|
||||
// Client returns the clientset for this DryRun.
|
||||
func (d *DryRun) Client() clientset.Interface {
|
||||
return d.client
|
||||
}
|
||||
|
||||
// DynamicClient returns the dynamic client for this DryRun.
|
||||
func (d *DryRun) DynamicClient() dynamic.Interface {
|
||||
return d.dynamicClient
|
||||
}
|
||||
|
||||
// FakeClient returns the fake client for this DryRun.
|
||||
func (d *DryRun) FakeClient() clientset.Interface {
|
||||
return d.fakeClient
|
||||
}
|
||||
|
||||
// addRectors is by default called by NewDryRun after creating the fake client.
|
||||
// It prepends a set of reactors before the default fake client reactor.
|
||||
func (d *DryRun) addReactors() {
|
||||
reactors := []testing.Reactor{
|
||||
// Add a reactor for logging all requests that reach the fake client.
|
||||
&testing.SimpleReactor{
|
||||
Verb: "*",
|
||||
Resource: "*",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
d.LogAction(action)
|
||||
return false, nil, nil
|
||||
},
|
||||
},
|
||||
// Add a reactor for all GET requests that reach the fake client.
|
||||
// This reactor calls the real dynamic client, but if it cannot process the object
|
||||
// the reactor chain is continued.
|
||||
&testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "*",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
getAction, ok := action.(testing.GetAction)
|
||||
if !ok {
|
||||
return true, nil, errors.New("cannot cast reactor action to GetAction")
|
||||
}
|
||||
|
||||
handled, obj, err := d.handleGetAction(getAction)
|
||||
if err != nil {
|
||||
fmt.Fprintln(d.writer, "[dryrun] Real object does not exist. "+
|
||||
"Attempting to GET from followup reactors or from the fake client tracker")
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return handled, obj, err
|
||||
},
|
||||
},
|
||||
// Add a reactor for all LIST requests that reach the fake client.
|
||||
// This reactor calls the real dynamic client, but if it cannot process the object
|
||||
// the reactor chain is continued.
|
||||
&testing.SimpleReactor{
|
||||
Verb: "list",
|
||||
Resource: "*",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
listAction, ok := action.(testing.ListAction)
|
||||
if !ok {
|
||||
return true, nil, errors.New("cannot cast reactor action to ListAction")
|
||||
}
|
||||
|
||||
handled, obj, err := d.handleListAction(listAction)
|
||||
if err != nil {
|
||||
fmt.Fprintln(d.writer, "[dryrun] Real object does not exist. "+
|
||||
"Attempting to LIST from followup reactors or from the fake client tracker")
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return handled, obj, err
|
||||
},
|
||||
},
|
||||
}
|
||||
d.fakeClient.Fake.ReactionChain = append(reactors, d.fakeClient.Fake.ReactionChain...)
|
||||
}
|
||||
|
||||
// handleGetAction tries to handle all GET actions with the dynamic client.
|
||||
func (d *DryRun) handleGetAction(action testing.GetAction) (bool, runtime.Object, error) {
|
||||
if d.dynamicClient == nil {
|
||||
return false, nil, errors.New("dynamicClient is nil")
|
||||
}
|
||||
|
||||
unstructuredObj, err := d.dynamicClient.
|
||||
Resource(action.GetResource()).
|
||||
Namespace(action.GetNamespace()).
|
||||
Get(context.Background(), action.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
newObj, err := d.decodeUnstructuredIntoAPIObject(action, unstructuredObj)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
return true, newObj, err
|
||||
}
|
||||
|
||||
// handleListAction tries to handle all LIST actions with the dynamic client.
|
||||
func (d *DryRun) handleListAction(action testing.ListAction) (bool, runtime.Object, error) {
|
||||
if d.dynamicClient == nil {
|
||||
return false, nil, errors.New("dynamicClient is nil")
|
||||
}
|
||||
|
||||
listOpts := metav1.ListOptions{
|
||||
LabelSelector: action.GetListRestrictions().Labels.String(),
|
||||
FieldSelector: action.GetListRestrictions().Fields.String(),
|
||||
}
|
||||
|
||||
unstructuredObj, err := d.dynamicClient.
|
||||
Resource(action.GetResource()).
|
||||
Namespace(action.GetNamespace()).
|
||||
List(context.Background(), listOpts)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
newObj, err := d.decodeUnstructuredIntoAPIObject(action, unstructuredObj)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
return true, newObj, err
|
||||
}
|
||||
|
||||
// decodeUnstructuredIntoAPIObject decodes an unstructured object into an API object.
|
||||
func (d *DryRun) decodeUnstructuredIntoAPIObject(action testing.Action, obj runtime.Unstructured) (runtime.Object, error) {
|
||||
objBytes, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObj, err := runtime.Decode(clientsetscheme.Codecs.UniversalDecoder(action.GetResource().GroupVersion()), objBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// LogAction logs details about an action, such as name, object and resource.
|
||||
func (d *DryRun) LogAction(action testing.Action) {
|
||||
// actionWithName is the generic interface for an action that has a name associated with it.
|
||||
type actionWithNameAndNamespace interface {
|
||||
testing.Action
|
||||
GetName() string
|
||||
GetNamespace() string
|
||||
}
|
||||
|
||||
// actionWithObject is the generic interface for an action that has an object associated with it.
|
||||
type actionWithObject interface {
|
||||
testing.Action
|
||||
GetObject() runtime.Object
|
||||
}
|
||||
|
||||
group := action.GetResource().Group
|
||||
if len(group) == 0 {
|
||||
group = "core"
|
||||
}
|
||||
fmt.Fprintf(d.writer, "[dryrun] Would perform action %s on resource %q in API group \"%s/%s\"\n",
|
||||
strings.ToUpper(action.GetVerb()), action.GetResource().Resource, group, action.GetResource().Version)
|
||||
|
||||
namedAction, ok := action.(actionWithNameAndNamespace)
|
||||
if ok {
|
||||
fmt.Fprintf(d.writer, "[dryrun] Resource name %q, namespace %q\n",
|
||||
namedAction.GetName(), namedAction.GetNamespace())
|
||||
}
|
||||
|
||||
objAction, ok := action.(actionWithObject)
|
||||
if ok && objAction.GetObject() != nil {
|
||||
d.LogObject(objAction.GetObject(), objAction.GetResource().GroupVersion())
|
||||
}
|
||||
}
|
||||
|
||||
// LogObject marshals the object and then prints it to the io.Writer of this DryRun.
|
||||
func (d *DryRun) LogObject(obj runtime.Object, gv schema.GroupVersion) {
|
||||
objBytes, err := d.marshalFunc(obj, gv)
|
||||
if err == nil {
|
||||
fmt.Fprintln(d.writer, "[dryrun] Attached object:")
|
||||
fmt.Fprintln(d.writer, string(objBytes))
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheckJobReactor returns a reactor that handles the GET action for the Job
|
||||
// object used for the "CreateJob" upgrade preflight check.
|
||||
func (d *DryRun) HealthCheckJobReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "jobs",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.GetAction)
|
||||
if !strings.HasPrefix(a.GetName(), "upgrade-health-check") || a.GetNamespace() != metav1.NamespaceSystem {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := getJob(a.GetName(), a.GetNamespace())
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PatchNodeReactor returns a reactor that handles the generic PATCH action on Node objects.
|
||||
func (d *DryRun) PatchNodeReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "patch",
|
||||
Resource: "nodes",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.PatchAction)
|
||||
obj := getNode(a.GetName())
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetNodeReactor returns a reactor that handles the generic GET action of Node objects.
|
||||
func (d *DryRun) GetNodeReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "nodes",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.GetAction)
|
||||
obj := getNode(a.GetName())
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetClusterInfoReactor returns a reactor that handles the GET action of the "cluster-info"
|
||||
// ConfigMap used during node bootstrap.
|
||||
func (d *DryRun) GetClusterInfoReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "configmaps",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.GetAction)
|
||||
if a.GetName() != bootstrapapi.ConfigMapClusterInfo || a.GetNamespace() != metav1.NamespacePublic {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := getClusterInfoConfigMap()
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetKubeadmConfigReactor returns a reactor that handles the GET action of the "kubeadm-config"
|
||||
// ConfigMap.
|
||||
func (d *DryRun) GetKubeadmConfigReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "configmaps",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.GetAction)
|
||||
if a.GetName() != constants.KubeadmConfigConfigMap || a.GetNamespace() != metav1.NamespaceSystem {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
obj := getKubeadmConfigMap()
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetKubeletConfigReactor returns a reactor that handles the GET action of the "kubelet-config"
|
||||
// ConfigMap.
|
||||
func (d *DryRun) GetKubeletConfigReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "configmaps",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.GetAction)
|
||||
if a.GetName() != constants.KubeletBaseConfigurationConfigMap || a.GetNamespace() != metav1.NamespaceSystem {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := getKubeletConfigMap()
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetKubeProxyConfigReactor returns a reactor that handles the GET action of the "kube-proxy"
|
||||
// ConfigMap.
|
||||
func (d *DryRun) GetKubeProxyConfigReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "configmaps",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.GetAction)
|
||||
if a.GetName() != constants.KubeProxyConfigMap || a.GetNamespace() != metav1.NamespaceSystem {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := getKubeProxyConfigMap()
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteBootstrapTokenReactor returns a reactor that handles the DELETE action
|
||||
// of bootstrap token Secret.
|
||||
func (d *DryRun) DeleteBootstrapTokenReactor() *testing.SimpleReactor {
|
||||
return &testing.SimpleReactor{
|
||||
Verb: "delete",
|
||||
Resource: "secrets",
|
||||
Reaction: func(action testing.Action) (bool, runtime.Object, error) {
|
||||
a := action.(testing.DeleteAction)
|
||||
if !strings.HasPrefix(a.GetName(), bootstrapapi.BootstrapTokenSecretPrefix) || a.GetNamespace() != metav1.NamespaceSystem {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.GetName(),
|
||||
Namespace: a.GetNamespace(),
|
||||
},
|
||||
}
|
||||
d.LogObject(obj, action.GetResource().GroupVersion())
|
||||
return true, obj, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getJob returns a fake Job object.
|
||||
func getJob(namespace, name string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Status: batchv1.JobStatus{
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{Type: batchv1.JobComplete},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getNode returns a fake Node object.
|
||||
func getNode(name string) *corev1.Node {
|
||||
return &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"kubernetes.io/hostname": name,
|
||||
},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getConfigMap returns a fake ConfigMap object.
|
||||
func getConfigMap(namespace, name string, data map[string]string) *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// getClusterInfoConfigMap returns a fake "cluster-info" ConfigMap.
|
||||
func getClusterInfoConfigMap() *corev1.ConfigMap {
|
||||
kubeconfig := dedent.Dedent(`apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJTkpmdGFCK09Xd0F3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBM01EZ3hNalF3TURSYUZ3MHpOREEzTURZeE1qUTFNRFJhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURhUURkaWdPeVdpbTZXLzQ4bjNQSG9WZVZCU2lkNldjbmRFV3VVcTVnQldYZGx0OTk2aCtWbkl0bHQKOHpDaGwvb1I1V2ZSYVJDODA1WitvTW4vWThJR1ZRM3QxaG55SW1ZbjR3M3Z6UlhvdUdlQmVpdTJTU1ZqZ0J3agpYanliYk1DbXJBZEljYkllWm1INjZldjV6KzVZS21aUlVZYzNoRGFIcFhkMEVFblp5SlY1d2FaczBYTVFVSE03CmVxT1pBWko5L21PM05VQnBsdnJQbnBPTUs3a1NFUFBnNzVjVTdXSG9KSEZrZVlXNTkzZ3NnQ3MyQnRVdTY0Y3EKYlZYOWJpZ3JZZGRwWmtvRUtLeFU4SEl3SHNJNVY4Um9uM21LRkdsckUxN2IybC84Q3FtQXVPdnl6TEllaVFHWAplZ0lhUi9uUkhISUQ5QVRpNnRYOEVhRERwZXYvQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRWWFDRGU2eXVWcVNhV1Y3M3pMaldtR2hpYVR6QVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ2NlTW5uaVBhcQpHUkVqR3A2WFJTN1FFWW1RcmJFZCtUVjVKUTE4byttVzZvR3BaOENBM0pBSFFETk5QRW1QYzB5dVhEeE85QkZYCmJnMlhSSCtLTkozVzJKZlExVEgzVFhKRWF4Zkh1WDRtQjU5UkNsQzExNGtsV2RIeDFqN0FtRWt1eTZ0ZGpuYWkKZmh0U0dueEEwM2JwN2I4Z28zSWpXNE5wV1JOMVdHNTl2YTBKOEJIRmg3Q0RpZUxuK0RNdUk2M0Jna1kveTJzMApML2RtOVBmcWdVSzFBMy8wZGhDVjZiRUNqekEzSkJld21kSC8rVUJPeVkybUMwNVlQMzNkMHA5eXlrYmtkWE5xCkRONXlBc3ZNUC9PV0NuQjFlQlFUb2pNODJMU3F3dHZtbU1SNHRXYXVoOXVkVktHY2s0eEJaV3Vkcm5LRFVVWEkKUURNUFJnSkMvTng0Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
server: https://192.168.0.101:6443
|
||||
name: ""
|
||||
contexts: null
|
||||
current-context: ""
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users: null
|
||||
`)
|
||||
data := map[string]string{
|
||||
bootstrapapi.JWSSignatureKeyPrefix + "abcdef": "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiY2RlZiJ9..wUZ0q9o0VK1RWFptmSBOEem2bXHWrHyxrposHg0mb1w",
|
||||
bootstrapapi.KubeConfigKey: kubeconfig,
|
||||
}
|
||||
return getConfigMap(metav1.NamespacePublic, bootstrapapi.ConfigMapClusterInfo, data)
|
||||
}
|
||||
|
||||
// getKubeadmConfigMap returns a fake "kubeadm-config" ConfigMap.
|
||||
func getKubeadmConfigMap() *corev1.ConfigMap {
|
||||
ccData := fmt.Sprintf(`apiServer: {}
|
||||
apiVersion: kubeadm.k8s.io/v1beta4
|
||||
caCertificateValidityPeriod: 87600h0m0s
|
||||
certificateValidityPeriod: 100h0m0s
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
clusterName: kubernetes
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
- name: cluster-signing-duration
|
||||
value: 24h
|
||||
dns: {}
|
||||
encryptionAlgorithm: RSA-2048
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/etcd
|
||||
imageRepository: registry.k8s.io
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: %s
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: 192.168.0.0/16
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
proxy: {}
|
||||
scheduler: {}
|
||||
`, constants.MinimumControlPlaneVersion)
|
||||
|
||||
data := map[string]string{
|
||||
constants.ClusterConfigurationKind: ccData,
|
||||
}
|
||||
return getConfigMap(metav1.NamespaceSystem, constants.KubeadmConfigConfigMap, data)
|
||||
}
|
||||
|
||||
// getKubeletConfigMap returns a fake "kubelet-config" ConfigMap.
|
||||
func getKubeletConfigMap() *corev1.ConfigMap {
|
||||
configData := `apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
authentication:
|
||||
anonymous:
|
||||
enabled: false
|
||||
webhook:
|
||||
enabled: true
|
||||
x509:
|
||||
clientCAFile: /etc/kubernetes/pki/ca.crt
|
||||
authorization:
|
||||
mode: Webhook
|
||||
cgroupDriver: systemd
|
||||
clusterDNS:
|
||||
- 10.96.0.10
|
||||
clusterDomain: cluster.local
|
||||
healthzBindAddress: 127.0.0.1
|
||||
healthzPort: 10248
|
||||
kind: KubeletConfiguration
|
||||
memorySwap: {}
|
||||
resolvConf: /run/systemd/resolve/resolv.conf
|
||||
rotateCertificates: true
|
||||
staticPodPath: /etc/kubernetes/manifests
|
||||
`
|
||||
data := map[string]string{
|
||||
constants.KubeletBaseConfigurationConfigMapKey: configData,
|
||||
}
|
||||
return getConfigMap(metav1.NamespaceSystem, constants.KubeletBaseConfigurationConfigMap, data)
|
||||
}
|
||||
|
||||
// getKubeProxyConfigMap returns a fake "kube-proxy" ConfigMap.
|
||||
func getKubeProxyConfigMap() *corev1.ConfigMap {
|
||||
configData := `apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
bindAddress: 0.0.0.0
|
||||
bindAddressHardFail: false
|
||||
clientConnection:
|
||||
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
|
||||
clusterCIDR: 192.168.0.0/16
|
||||
kind: KubeProxyConfiguration
|
||||
`
|
||||
kubeconfigData := `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
server: https://192.168.0.101:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: default
|
||||
user: default
|
||||
name: default
|
||||
current-context: default
|
||||
users:
|
||||
- name: default
|
||||
user:
|
||||
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
`
|
||||
data := map[string]string{
|
||||
constants.KubeProxyConfigMapKey: configData,
|
||||
"kubeconfig.conf": kubeconfigData,
|
||||
}
|
||||
return getConfigMap(metav1.NamespaceSystem, constants.KubeProxyConfigMap, data)
|
||||
}
|
436
cmd/kubeadm/app/util/apiclient/dryrun_test.go
Normal file
436
cmd/kubeadm/app/util/apiclient/dryrun_test.go
Normal file
@ -0,0 +1,436 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
|
||||
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
)
|
||||
|
||||
func TestNewDryRunWithKubeConfigFile(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "dryrun-test")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to create temporary directory: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
}()
|
||||
|
||||
kubeconfig := kubeconfigphase.CreateWithToken(
|
||||
"some-server:6443",
|
||||
"cluster-foo",
|
||||
"user-bar",
|
||||
[]byte("fake-ca-cert"),
|
||||
"fake-token",
|
||||
)
|
||||
path := filepath.Join(tmpDir, "some-file")
|
||||
if err := kubeconfigphase.WriteToDisk(path, kubeconfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
d := NewDryRun()
|
||||
if err := d.WithKubeConfigFile(path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if d.FakeClient() == nil {
|
||||
t.Fatal("expected fakeClient to be non-nil")
|
||||
}
|
||||
if d.Client() == nil {
|
||||
t.Fatal("expected client to be non-nil")
|
||||
}
|
||||
if d.DynamicClient() == nil {
|
||||
t.Fatal("expected dynamicClient to be non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrependAppendReactor(t *testing.T) {
|
||||
foo := &clienttesting.SimpleReactor{Verb: "foo"}
|
||||
bar := &clienttesting.SimpleReactor{Verb: "bar"}
|
||||
baz := &clienttesting.SimpleReactor{Verb: "baz"}
|
||||
qux := &clienttesting.SimpleReactor{Verb: "qux"}
|
||||
|
||||
d := NewDryRun()
|
||||
lenBefore := len(d.fakeClient.Fake.ReactionChain)
|
||||
d.PrependReactor(foo).PrependReactor(bar).
|
||||
AppendReactor(baz).AppendReactor(qux)
|
||||
|
||||
// [ log, bar, foo, get, list, baz, qux, default ]
|
||||
// 1 2 5 6
|
||||
expectedIdx := map[string]int{
|
||||
foo.Verb: 2,
|
||||
bar.Verb: 1,
|
||||
baz.Verb: 5,
|
||||
qux.Verb: 6,
|
||||
}
|
||||
expectedLen := lenBefore + len(expectedIdx)
|
||||
|
||||
if len(d.fakeClient.Fake.ReactionChain) != expectedLen {
|
||||
t.Fatalf("expected len of reactor chain: %d, got: %d",
|
||||
expectedLen, len(d.fakeClient.Fake.ReactionChain))
|
||||
}
|
||||
|
||||
for actual, r := range d.fakeClient.Fake.ReactionChain {
|
||||
s := r.(*clienttesting.SimpleReactor)
|
||||
expected, exists := expectedIdx[s.Verb]
|
||||
if exists {
|
||||
delete(expectedIdx, s.Verb)
|
||||
if actual != expected {
|
||||
t.Errorf("expected idx for verb %q: %d, got %d", s.Verb, expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(expectedIdx) != 0 {
|
||||
t.Fatalf("expected len of exists map to be 0 after iteration, got: %d", len(expectedIdx))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactors(t *testing.T) {
|
||||
type apiCallCase struct {
|
||||
name string
|
||||
namespace string
|
||||
expectedError bool
|
||||
}
|
||||
ctx := context.Background()
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(d *DryRun)
|
||||
apiCall func(d *DryRun, namespace, name string) error
|
||||
apiCallCases []apiCallCase
|
||||
}{
|
||||
{
|
||||
name: "HealthCheckJobReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.HealthCheckJobReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, namespace, name string) error {
|
||||
obj, err := d.FakeClient().BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff := cmp.Diff(getJob(name, namespace), obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "foo",
|
||||
namespace: "bar",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "upgrade-health-check",
|
||||
namespace: metav1.NamespaceSystem,
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PatchNodeReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.PatchNodeReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, _, name string) error {
|
||||
obj, err := d.FakeClient().CoreV1().Nodes().Patch(ctx, name, "", []byte{}, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff := cmp.Diff(getNode(name), obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "some-node",
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "GetNodeReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.GetNodeReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, _, name string) error {
|
||||
obj, err := d.FakeClient().CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff := cmp.Diff(getNode(name), obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "some-node",
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "GetClusterInfoReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.GetClusterInfoReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, namespace, name string) error {
|
||||
obj, err := d.FakeClient().CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedObj := getClusterInfoConfigMap()
|
||||
if diff := cmp.Diff(expectedObj, obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "foo",
|
||||
namespace: "bar",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "cluster-info",
|
||||
namespace: metav1.NamespacePublic,
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "GetKubeadmConfigReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.GetKubeadmConfigReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, namespace, name string) error {
|
||||
obj, err := d.FakeClient().CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedObj := getKubeadmConfigMap()
|
||||
if diff := cmp.Diff(expectedObj, obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "foo",
|
||||
namespace: "bar",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "kubeadm-config",
|
||||
namespace: metav1.NamespaceSystem,
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "GetKubeletConfigReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.GetKubeletConfigReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, namespace, name string) error {
|
||||
obj, err := d.FakeClient().CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedObj := getKubeletConfigMap()
|
||||
if diff := cmp.Diff(expectedObj, obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "foo",
|
||||
namespace: "bar",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "kubelet-config",
|
||||
namespace: metav1.NamespaceSystem,
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "GetKubeProxyConfigReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.GetKubeProxyConfigReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, namespace, name string) error {
|
||||
obj, err := d.FakeClient().CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedObj := getKubeProxyConfigMap()
|
||||
if diff := cmp.Diff(expectedObj, obj); diff != "" {
|
||||
return errors.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "foo",
|
||||
namespace: "bar",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "kube-proxy",
|
||||
namespace: metav1.NamespaceSystem,
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DeleteBootstrapTokenReactor",
|
||||
setup: func(d *DryRun) {
|
||||
d.PrependReactor((d.DeleteBootstrapTokenReactor()))
|
||||
},
|
||||
apiCall: func(d *DryRun, namespace, name string) error {
|
||||
err := d.FakeClient().CoreV1().Secrets(namespace).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
apiCallCases: []apiCallCase{
|
||||
{
|
||||
name: "foo",
|
||||
namespace: "bar",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "bootstrap-token-foo",
|
||||
namespace: metav1.NamespaceSystem,
|
||||
expectedError: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
d := NewDryRun().WithDefaultMarshalFunction().WithWriter(io.Discard)
|
||||
tc.setup(d)
|
||||
for _, ac := range tc.apiCallCases {
|
||||
if err := tc.apiCall(d, ac.namespace, ac.name); (err != nil) != ac.expectedError {
|
||||
t.Errorf("expected error: %v, got: %v, error: %v", ac.expectedError, err != nil, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeUnstructuredIntoAPIObject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
action clienttesting.Action
|
||||
unstructured runtime.Unstructured
|
||||
expectedObj runtime.Object
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "valid: ConfigMap is decoded",
|
||||
action: clienttesting.NewGetAction(
|
||||
schema.GroupVersionResource{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "configmaps",
|
||||
},
|
||||
metav1.NamespaceSystem,
|
||||
"kubeadm-config",
|
||||
),
|
||||
unstructured: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"namespace": "foo",
|
||||
"name": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedObj: &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid: unknown GVR cannot be decoded",
|
||||
action: clienttesting.NewGetAction(
|
||||
schema.GroupVersionResource{
|
||||
Group: "foo",
|
||||
Version: "bar",
|
||||
Resource: "baz",
|
||||
},
|
||||
"some-ns",
|
||||
"baz01",
|
||||
),
|
||||
unstructured: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "foo/bar",
|
||||
"kind": "baz",
|
||||
"metadata": map[string]interface{}{
|
||||
"namespace": "some-ns",
|
||||
"name": "baz01",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
d := NewDryRun().WithDefaultMarshalFunction().WithWriter(io.Discard)
|
||||
obj, err := d.decodeUnstructuredIntoAPIObject(tc.action, tc.unstructured)
|
||||
if (err != nil) != tc.expectedError {
|
||||
t.Errorf("expected error: %v, got: %v, error: %v", tc.expectedError, err != nil, err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.expectedObj, obj); diff != "" {
|
||||
t.Errorf("object differs (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,268 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
)
|
||||
|
||||
// DryRunGetter is an interface that must be supplied to the NewDryRunClient function in order to construct a fully functional fake dryrun clientset
|
||||
type DryRunGetter interface {
|
||||
HandleGetAction(core.GetAction) (bool, runtime.Object, error)
|
||||
HandleListAction(core.ListAction) (bool, runtime.Object, error)
|
||||
}
|
||||
|
||||
// MarshalFunc takes care of converting any object to a byte array for displaying the object to the user
|
||||
type MarshalFunc func(runtime.Object, schema.GroupVersion) ([]byte, error)
|
||||
|
||||
// DefaultMarshalFunc is the default MarshalFunc used; uses YAML to print objects to the user
|
||||
func DefaultMarshalFunc(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) {
|
||||
return kubeadmutil.MarshalToYaml(obj, gv)
|
||||
}
|
||||
|
||||
// DryRunClientOptions specifies options to pass to NewDryRunClientWithOpts in order to get a dryrun clientset
|
||||
type DryRunClientOptions struct {
|
||||
Writer io.Writer
|
||||
Getter DryRunGetter
|
||||
PrependReactors []core.Reactor
|
||||
AppendReactors []core.Reactor
|
||||
MarshalFunc MarshalFunc
|
||||
PrintGETAndLIST bool
|
||||
}
|
||||
|
||||
// GetDefaultDryRunClientOptions returns the default DryRunClientOptions values
|
||||
func GetDefaultDryRunClientOptions(drg DryRunGetter, w io.Writer) DryRunClientOptions {
|
||||
return DryRunClientOptions{
|
||||
Writer: w,
|
||||
Getter: drg,
|
||||
PrependReactors: []core.Reactor{},
|
||||
AppendReactors: []core.Reactor{},
|
||||
MarshalFunc: DefaultMarshalFunc,
|
||||
PrintGETAndLIST: false,
|
||||
}
|
||||
}
|
||||
|
||||
// actionWithName is the generic interface for an action that has a name associated with it
|
||||
// This just makes it easier to catch all actions that has a name; instead of hard-coding all request that has it associated
|
||||
type actionWithName interface {
|
||||
core.Action
|
||||
GetName() string
|
||||
}
|
||||
|
||||
// actionWithObject is the generic interface for an action that has an object associated with it
|
||||
// This just makes it easier to catch all actions that has an object; instead of hard-coding all request that has it associated
|
||||
type actionWithObject interface {
|
||||
core.Action
|
||||
GetObject() runtime.Object
|
||||
}
|
||||
|
||||
// NewDryRunClient is a wrapper for NewDryRunClientWithOpts using some default values
|
||||
func NewDryRunClient(drg DryRunGetter, w io.Writer) clientset.Interface {
|
||||
return NewDryRunClientWithOpts(GetDefaultDryRunClientOptions(drg, w))
|
||||
}
|
||||
|
||||
// NewDryRunClientWithOpts returns a clientset.Interface that can be used normally for talking to the Kubernetes API.
|
||||
// This client doesn't apply changes to the backend. The client gets GET/LIST values from the DryRunGetter implementation.
|
||||
// This client logs all I/O to the writer w in YAML format
|
||||
func NewDryRunClientWithOpts(opts DryRunClientOptions) clientset.Interface {
|
||||
// Build a chain of reactors to act like a normal clientset; but log everything that is happening and don't change any state
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
|
||||
// Build the chain of reactors. Order matters; first item here will be invoked first on match, then the second one will be evaluated, etc.
|
||||
defaultReactorChain := []core.Reactor{
|
||||
// Log everything that happens. Default the object if it's about to be created/updated so that the logged object is representative.
|
||||
&core.SimpleReactor{
|
||||
Verb: "*",
|
||||
Resource: "*",
|
||||
Reaction: func(action core.Action) (bool, runtime.Object, error) {
|
||||
logDryRunAction(action, opts.Writer, opts.MarshalFunc)
|
||||
|
||||
return false, nil, nil
|
||||
},
|
||||
},
|
||||
// Let the DryRunGetter implementation take care of all GET requests.
|
||||
// The DryRunGetter implementation may call a real API Server behind the scenes or just fake everything
|
||||
&core.SimpleReactor{
|
||||
Verb: "get",
|
||||
Resource: "*",
|
||||
Reaction: func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction, ok := action.(core.GetAction)
|
||||
if !ok {
|
||||
// If the GetAction cast fails, this could be an ActionImpl with a "get" verb.
|
||||
// Such actions could be invoked from any of the fake discovery calls, such as ServerVersion().
|
||||
// Attempt the cast to ActionImpl and construct a GetActionImpl from it.
|
||||
actionImpl, ok := action.(core.ActionImpl)
|
||||
if ok {
|
||||
getAction = core.GetActionImpl{ActionImpl: actionImpl}
|
||||
} else {
|
||||
// something's wrong, we can't handle this event
|
||||
return true, nil, errors.New("can't cast get reactor event action object to GetAction interface")
|
||||
}
|
||||
}
|
||||
handled, obj, err := opts.Getter.HandleGetAction(getAction)
|
||||
|
||||
if opts.PrintGETAndLIST {
|
||||
// Print the marshalled object format with one tab indentation
|
||||
objBytes, err := opts.MarshalFunc(obj, action.GetResource().GroupVersion())
|
||||
if err == nil {
|
||||
fmt.Println("[dryrun] Returning faked GET response:")
|
||||
PrintBytesWithLinePrefix(opts.Writer, objBytes, "\t")
|
||||
}
|
||||
}
|
||||
|
||||
return handled, obj, err
|
||||
},
|
||||
},
|
||||
// Let the DryRunGetter implementation take care of all GET requests.
|
||||
// The DryRunGetter implementation may call a real API Server behind the scenes or just fake everything
|
||||
&core.SimpleReactor{
|
||||
Verb: "list",
|
||||
Resource: "*",
|
||||
Reaction: func(action core.Action) (bool, runtime.Object, error) {
|
||||
listAction, ok := action.(core.ListAction)
|
||||
if !ok {
|
||||
// something's wrong, we can't handle this event
|
||||
return true, nil, errors.New("can't cast list reactor event action object to ListAction interface")
|
||||
}
|
||||
handled, objs, err := opts.Getter.HandleListAction(listAction)
|
||||
|
||||
if opts.PrintGETAndLIST {
|
||||
// Print the marshalled object format with one tab indentation
|
||||
objBytes, err := opts.MarshalFunc(objs, action.GetResource().GroupVersion())
|
||||
if err == nil {
|
||||
fmt.Println("[dryrun] Returning faked LIST response:")
|
||||
PrintBytesWithLinePrefix(opts.Writer, objBytes, "\t")
|
||||
}
|
||||
}
|
||||
|
||||
return handled, objs, err
|
||||
},
|
||||
},
|
||||
// For the verbs that modify anything on the server; just return the object if present and exit successfully
|
||||
&core.SimpleReactor{
|
||||
Verb: "create",
|
||||
Resource: "*",
|
||||
Reaction: func(action core.Action) (bool, runtime.Object, error) {
|
||||
objAction, ok := action.(actionWithObject)
|
||||
if obj := objAction.GetObject(); ok && obj != nil {
|
||||
if secret, ok := obj.(*v1.Secret); ok {
|
||||
if secret.Namespace == metav1.NamespaceSystem && strings.HasPrefix(secret.Name, bootstrapapi.BootstrapTokenSecretPrefix) {
|
||||
// bypass bootstrap token secret create event so that it can be persisted to the backing data store
|
||||
// this secret should be readable during the uploadcerts init phase if it has already been created
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return successfulModificationReactorFunc(action)
|
||||
},
|
||||
},
|
||||
&core.SimpleReactor{
|
||||
Verb: "update",
|
||||
Resource: "*",
|
||||
Reaction: successfulModificationReactorFunc,
|
||||
},
|
||||
&core.SimpleReactor{
|
||||
Verb: "delete",
|
||||
Resource: "*",
|
||||
Reaction: successfulModificationReactorFunc,
|
||||
},
|
||||
&core.SimpleReactor{
|
||||
Verb: "delete-collection",
|
||||
Resource: "*",
|
||||
Reaction: successfulModificationReactorFunc,
|
||||
},
|
||||
&core.SimpleReactor{
|
||||
Verb: "patch",
|
||||
Resource: "*",
|
||||
Reaction: successfulModificationReactorFunc,
|
||||
},
|
||||
}
|
||||
|
||||
// The chain of reactors will look like this:
|
||||
// opts.PrependReactors | defaultReactorChain | opts.AppendReactors | client.Fake.ReactionChain (default reactors for the fake clientset)
|
||||
fullReactorChain := append(opts.PrependReactors, defaultReactorChain...)
|
||||
fullReactorChain = append(fullReactorChain, opts.AppendReactors...)
|
||||
|
||||
// Prepend the reaction chain with our reactors. Important, these MUST be prepended; not appended due to how the fake clientset works by default
|
||||
client.Fake.ReactionChain = append(fullReactorChain, client.Fake.ReactionChain...)
|
||||
return client
|
||||
}
|
||||
|
||||
// successfulModificationReactorFunc is a no-op that just returns the POSTed/PUTed value if present; but does nothing to edit any backing data store.
|
||||
func successfulModificationReactorFunc(action core.Action) (bool, runtime.Object, error) {
|
||||
objAction, ok := action.(actionWithObject)
|
||||
if ok {
|
||||
return true, objAction.GetObject(), nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// logDryRunAction logs the action that was recorded by the "catch-all" (*,*) reactor and tells the user what would have happened in an user-friendly way
|
||||
func logDryRunAction(action core.Action, w io.Writer, marshalFunc MarshalFunc) {
|
||||
|
||||
group := action.GetResource().Group
|
||||
if len(group) == 0 {
|
||||
group = "core"
|
||||
}
|
||||
fmt.Fprintf(w, "[dryrun] Would perform action %s on resource %q in API group \"%s/%s\"\n", strings.ToUpper(action.GetVerb()), action.GetResource().Resource, group, action.GetResource().Version)
|
||||
|
||||
namedAction, ok := action.(actionWithName)
|
||||
if ok {
|
||||
fmt.Fprintf(w, "[dryrun] Resource name: %q\n", namedAction.GetName())
|
||||
}
|
||||
|
||||
objAction, ok := action.(actionWithObject)
|
||||
if ok && objAction.GetObject() != nil {
|
||||
// Print the marshalled object with a tab indentation
|
||||
objBytes, err := marshalFunc(objAction.GetObject(), action.GetResource().GroupVersion())
|
||||
if err == nil {
|
||||
fmt.Println("[dryrun] Attached object:")
|
||||
PrintBytesWithLinePrefix(w, objBytes, "\t")
|
||||
}
|
||||
}
|
||||
|
||||
patchAction, ok := action.(core.PatchAction)
|
||||
if ok {
|
||||
// Replace all occurrences of \" with a simple " when printing
|
||||
fmt.Fprintf(w, "[dryrun] Attached patch:\n\t%s\n", strings.Replace(string(patchAction.GetPatch()), `\"`, `"`, -1))
|
||||
}
|
||||
}
|
||||
|
||||
// PrintBytesWithLinePrefix prints objBytes to writer w with linePrefix in the beginning of every line
|
||||
func PrintBytesWithLinePrefix(w io.Writer, objBytes []byte, linePrefix string) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(objBytes))
|
||||
for scanner.Scan() {
|
||||
fmt.Fprintf(w, "%s%s\n", linePrefix, scanner.Text())
|
||||
}
|
||||
}
|
@ -1,135 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
pkgversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestLogDryRunAction(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
action core.Action
|
||||
expectedBytes []byte
|
||||
buf *bytes.Buffer
|
||||
}{
|
||||
{
|
||||
name: "action GET on services",
|
||||
action: core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "services"}, "default", "kubernetes"),
|
||||
expectedBytes: []byte(`[dryrun] Would perform action GET on resource "services" in API group "core/v1"
|
||||
[dryrun] Resource name: "kubernetes"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "action GET on clusterrolebindings",
|
||||
action: core.NewRootGetAction(schema.GroupVersionResource{Group: rbac.GroupName, Version: rbac.SchemeGroupVersion.Version, Resource: "clusterrolebindings"}, "system:node"),
|
||||
expectedBytes: []byte(`[dryrun] Would perform action GET on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
|
||||
[dryrun] Resource name: "system:node"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "action LIST on services",
|
||||
action: core.NewListAction(schema.GroupVersionResource{Version: "v1", Resource: "services"}, schema.GroupVersionKind{Version: "v1", Kind: "Service"}, "default", metav1.ListOptions{}),
|
||||
expectedBytes: []byte(`[dryrun] Would perform action LIST on resource "services" in API group "core/v1"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "action CREATE on services",
|
||||
action: core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "services"}, "default", &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
ClusterIP: "1.1.1.1",
|
||||
},
|
||||
}),
|
||||
expectedBytes: []byte(`[dryrun] Would perform action CREATE on resource "services" in API group "core/v1"
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: foo
|
||||
spec:
|
||||
clusterIP: 1.1.1.1
|
||||
status:
|
||||
loadBalancer: {}
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "action PATCH on nodes",
|
||||
action: core.NewPatchAction(schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "default", "my-node", "application/strategic-merge-patch+json", []byte(`{"spec":{"taints":[{"key": "foo", "value": "bar"}]}}`)),
|
||||
expectedBytes: []byte(`[dryrun] Would perform action PATCH on resource "nodes" in API group "core/v1"
|
||||
[dryrun] Resource name: "my-node"
|
||||
[dryrun] Attached patch:
|
||||
{"spec":{"taints":[{"key": "foo", "value": "bar"}]}}
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "action DELETE on pods",
|
||||
action: core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "pods"}, "default", "my-pod"),
|
||||
expectedBytes: []byte(`[dryrun] Would perform action DELETE on resource "pods" in API group "core/v1"
|
||||
[dryrun] Resource name: "my-pod"
|
||||
`),
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
rt.buf = bytes.NewBufferString("")
|
||||
logDryRunAction(rt.action, rt.buf, DefaultMarshalFunc)
|
||||
actualBytes := rt.buf.Bytes()
|
||||
|
||||
if !bytes.Equal(actualBytes, rt.expectedBytes) {
|
||||
t.Errorf(
|
||||
"failed LogDryRunAction:\n\texpected bytes: %q\n\t actual: %q",
|
||||
rt.expectedBytes,
|
||||
actualBytes,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoveryServerVersion(t *testing.T) {
|
||||
dryRunGetter := &InitDryRunGetter{
|
||||
controlPlaneName: "controlPlane",
|
||||
serviceSubnet: "serviceSubnet",
|
||||
}
|
||||
c := NewDryRunClient(dryRunGetter, io.Discard)
|
||||
fakeclientDiscovery, ok := c.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
if !ok {
|
||||
t.Fatal("could not obtain FakeDiscovery from dry run client")
|
||||
}
|
||||
const gitVersion = "foo"
|
||||
fakeclientDiscovery.FakedServerVersion = &pkgversion.Info{GitVersion: gitVersion}
|
||||
ver, err := c.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("Get ServerVersion failed.: %v", err)
|
||||
}
|
||||
if ver.GitVersion != gitVersion {
|
||||
t.Fatalf("GitVersion did not match, expected %s, got %s", gitVersion, ver.GitVersion)
|
||||
}
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
core "k8s.io/client-go/testing"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
|
||||
// InitDryRunGetter implements the DryRunGetter interface and can be used to GET/LIST values in the dryrun fake clientset
|
||||
// Need to handle these routes in a special manner:
|
||||
// - GET /default/services/kubernetes -- must return a valid Service
|
||||
// - GET /clusterrolebindings/system:nodes -- can safely return a NotFound error
|
||||
// - GET /nodes/<node-name> -- must return a valid Node
|
||||
// - ...all other, unknown GETs/LISTs will be logged
|
||||
type InitDryRunGetter struct {
|
||||
controlPlaneName string
|
||||
serviceSubnet string
|
||||
}
|
||||
|
||||
// InitDryRunGetter should implement the DryRunGetter interface
|
||||
var _ DryRunGetter = &InitDryRunGetter{}
|
||||
|
||||
// NewInitDryRunGetter creates a new instance of the InitDryRunGetter struct
|
||||
func NewInitDryRunGetter(controlPlaneName string, serviceSubnet string) *InitDryRunGetter {
|
||||
return &InitDryRunGetter{
|
||||
controlPlaneName: controlPlaneName,
|
||||
serviceSubnet: serviceSubnet,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleGetAction handles GET actions to the dryrun clientset this interface supports
|
||||
func (idr *InitDryRunGetter) HandleGetAction(action core.GetAction) (bool, runtime.Object, error) {
|
||||
funcs := []func(core.GetAction) (bool, runtime.Object, error){
|
||||
idr.handleKubernetesService,
|
||||
idr.handleGetNode,
|
||||
idr.handleSystemNodesClusterRoleBinding,
|
||||
}
|
||||
for _, f := range funcs {
|
||||
handled, obj, err := f(action)
|
||||
if handled {
|
||||
return handled, obj, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// HandleListAction handles GET actions to the dryrun clientset this interface supports.
|
||||
// Currently there are no known LIST calls during kubeadm init this code has to take care of.
|
||||
func (idr *InitDryRunGetter) HandleListAction(action core.ListAction) (bool, runtime.Object, error) {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// handleKubernetesService returns a faked Kubernetes service in order to be able to continue running kubeadm init.
|
||||
// The CoreDNS addon code GETs the Kubernetes service in order to extract the service subnet
|
||||
func (idr *InitDryRunGetter) handleKubernetesService(action core.GetAction) (bool, runtime.Object, error) {
|
||||
if action.GetName() != "kubernetes" || action.GetNamespace() != metav1.NamespaceDefault || action.GetResource().Resource != "services" {
|
||||
// We can't handle this event
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
_, svcSubnet, err := netutils.ParseCIDRSloppy(idr.serviceSubnet)
|
||||
if err != nil {
|
||||
return true, nil, errors.Wrapf(err, "error parsing CIDR %q", idr.serviceSubnet)
|
||||
}
|
||||
|
||||
internalAPIServerVirtualIP, err := netutils.GetIndexedIP(svcSubnet, 1)
|
||||
if err != nil {
|
||||
return true, nil, errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", svcSubnet.String())
|
||||
}
|
||||
|
||||
// The only used field of this Service object is the ClusterIP, which CoreDNS uses to calculate its own IP
|
||||
return true, &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubernetes",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"component": "apiserver",
|
||||
"provider": "kubernetes",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
ClusterIP: internalAPIServerVirtualIP.String(),
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "https",
|
||||
Port: 443,
|
||||
TargetPort: intstr.FromInt32(6443),
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// handleGetNode returns a fake node object for the purpose of moving kubeadm init forwards.
|
||||
func (idr *InitDryRunGetter) handleGetNode(action core.GetAction) (bool, runtime.Object, error) {
|
||||
if action.GetName() != idr.controlPlaneName || action.GetResource().Resource != "nodes" {
|
||||
// We can't handle this event
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idr.controlPlaneName,
|
||||
Labels: map[string]string{
|
||||
"kubernetes.io/hostname": idr.controlPlaneName,
|
||||
},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// handleSystemNodesClusterRoleBinding handles the GET call to the system:nodes clusterrolebinding
|
||||
func (idr *InitDryRunGetter) handleSystemNodesClusterRoleBinding(action core.GetAction) (bool, runtime.Object, error) {
|
||||
if action.GetName() != constants.NodesClusterRoleBinding || action.GetResource().Resource != "clusterrolebindings" {
|
||||
// We can't handle this event
|
||||
return false, nil, nil
|
||||
}
|
||||
// We can safely return a NotFound error here as the code will just proceed normally and don't care about modifying this clusterrolebinding
|
||||
// This can only happen on an upgrade; and in that case the ClientBackedDryRunGetter impl will be used
|
||||
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), "clusterrolebinding not found")
|
||||
}
|
@ -1,123 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestHandleGetAction(t *testing.T) {
|
||||
controlPlaneName := "control-plane-foo"
|
||||
serviceSubnet := "10.96.0.1/12"
|
||||
idr := NewInitDryRunGetter(controlPlaneName, serviceSubnet)
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
action core.GetActionImpl
|
||||
expectedHandled bool
|
||||
expectedObjectJSON []byte
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "get default services",
|
||||
action: core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "services"}, "default", "kubernetes"),
|
||||
expectedHandled: true,
|
||||
expectedObjectJSON: []byte(`{"metadata":{"name":"kubernetes","namespace":"default","creationTimestamp":null,"labels":{"component":"apiserver","provider":"kubernetes"}},"spec":{"ports":[{"name":"https","port":443,"targetPort":6443}],"clusterIP":"10.96.0.1"},"status":{"loadBalancer":{}}}`),
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "get nodes",
|
||||
action: core.NewRootGetAction(schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, controlPlaneName),
|
||||
expectedHandled: true,
|
||||
expectedObjectJSON: []byte(`{"metadata":{"name":"control-plane-foo","creationTimestamp":null,"labels":{"kubernetes.io/hostname":"control-plane-foo"}},"spec":{},"status":{"daemonEndpoints":{"kubeletEndpoint":{"Port":0}},"nodeInfo":{"machineID":"","systemUUID":"","bootID":"","kernelVersion":"","osImage":"","containerRuntimeVersion":"","kubeletVersion":"","kubeProxyVersion":"","operatingSystem":"","architecture":""}}}`),
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "get clusterrolebinings",
|
||||
action: core.NewRootGetAction(schema.GroupVersionResource{Group: rbac.GroupName, Version: rbac.SchemeGroupVersion.Version, Resource: "clusterrolebindings"}, "system:node"),
|
||||
expectedHandled: true,
|
||||
expectedObjectJSON: []byte(``),
|
||||
expectedErr: true, // we expect a NotFound error here
|
||||
},
|
||||
{ // an ask for a kubernetes service in the _kube-system_ ns should not be answered
|
||||
name: "get kube-system services",
|
||||
action: core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "services"}, "kube-system", "kubernetes"),
|
||||
expectedHandled: false,
|
||||
expectedObjectJSON: []byte(``),
|
||||
expectedErr: false,
|
||||
},
|
||||
{ // an ask for an other service than kubernetes should not be answered
|
||||
name: "get default my-other-service",
|
||||
action: core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "services"}, "default", "my-other-service"),
|
||||
expectedHandled: false,
|
||||
expectedObjectJSON: []byte(``),
|
||||
expectedErr: false,
|
||||
},
|
||||
{ // an ask for an other node than the control-plane should not be answered
|
||||
name: "get other-node",
|
||||
action: core.NewRootGetAction(schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "other-node"),
|
||||
expectedHandled: false,
|
||||
expectedObjectJSON: []byte(``),
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
handled, obj, actualErr := idr.HandleGetAction(rt.action)
|
||||
objBytes := []byte(``)
|
||||
if obj != nil {
|
||||
var err error
|
||||
objBytes, err = json.Marshal(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't marshal returned object")
|
||||
}
|
||||
}
|
||||
|
||||
if handled != rt.expectedHandled {
|
||||
t.Errorf(
|
||||
"failed HandleGetAction:\n\texpected handled: %t\n\t actual: %t %v",
|
||||
rt.expectedHandled,
|
||||
handled,
|
||||
rt.action,
|
||||
)
|
||||
}
|
||||
|
||||
if !bytes.Equal(objBytes, rt.expectedObjectJSON) {
|
||||
t.Errorf(
|
||||
"failed HandleGetAction:\n\texpected object: %q\n\t actual: %q",
|
||||
rt.expectedObjectJSON,
|
||||
objBytes,
|
||||
)
|
||||
}
|
||||
|
||||
if (actualErr != nil) != rt.expectedErr {
|
||||
t.Errorf(
|
||||
"failed HandleGetAction:\n\texpected error: %t\n\t actual: %t %v",
|
||||
rt.expectedErr,
|
||||
(actualErr != nil),
|
||||
rt.action,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -76,7 +76,7 @@ func PrintDryRunFiles(files []FileToPrint, w io.Writer) error {
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "[dryrun] Would write file %q with content:\n", outputFilePath)
|
||||
apiclient.PrintBytesWithLinePrefix(w, fileBytes, "\t")
|
||||
fmt.Fprintf(w, "%s", fileBytes)
|
||||
}
|
||||
return errorsutil.NewAggregate(errs)
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func TestPrintDryRunFiles(t *testing.T) {
|
||||
},
|
||||
},
|
||||
wantW: "[dryrun] Would write file \"" + cfgPath + "\" with content:\n" +
|
||||
" apiVersion: kubeadm.k8s.io/unknownVersion\n",
|
||||
"apiVersion: kubeadm.k8s.io/unknownVersion",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
@ -91,7 +91,7 @@ func TestPrintDryRunFiles(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if gotW := w.String(); gotW != tt.wantW {
|
||||
t.Errorf("output: %v, expected output: %v", gotW, tt.wantW)
|
||||
t.Errorf("\noutput: %q\nexpected output: %q", gotW, tt.wantW)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user