From 6c6a702a99d0e38b69395b126c0b4e55b908210f Mon Sep 17 00:00:00 2001 From: Xianglin Gao Date: Wed, 6 May 2020 11:36:15 +0800 Subject: [PATCH 1/2] kubeadm: delete prepull ds Signed-off-by: Xianglin Gao --- cmd/kubeadm/app/cmd/upgrade/apply.go | 32 +-- cmd/kubeadm/app/constants/constants.go | 2 - cmd/kubeadm/app/phases/upgrade/BUILD | 2 - cmd/kubeadm/app/phases/upgrade/prepull.go | 213 ------------------ .../app/phases/upgrade/prepull_test.go | 154 ------------- 5 files changed, 4 insertions(+), 399 deletions(-) delete mode 100644 cmd/kubeadm/app/phases/upgrade/prepull.go delete mode 100644 cmd/kubeadm/app/phases/upgrade/prepull_test.go diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 671115cfae1..b9ab8bc5fe4 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -18,7 +18,6 @@ package upgrade import ( "fmt" - "time" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -27,7 +26,6 @@ import ( "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" @@ -35,10 +33,6 @@ import ( configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" ) -const ( - defaultImagePullTimeout = 15 * time.Minute -) - // applyFlags holds the information about the flags that can be passed to apply type applyFlags struct { *applyPlanFlags @@ -48,7 +42,6 @@ type applyFlags struct { dryRun bool etcdUpgrade bool renewCerts bool - imagePullTimeout time.Duration kustomizeDir string } @@ -60,10 +53,9 @@ func (f *applyFlags) sessionIsInteractive() bool { // NewCmdApply returns the cobra command for `kubeadm upgrade apply` func NewCmdApply(apf *applyPlanFlags) *cobra.Command { flags := &applyFlags{ - applyPlanFlags: apf, - imagePullTimeout: defaultImagePullTimeout, - etcdUpgrade: true, - renewCerts: true, + applyPlanFlags: apf, + etcdUpgrade: true, + renewCerts: true, } cmd := &cobra.Command{ @@ -88,7 +80,6 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command { cmd.Flags().BoolVar(&flags.dryRun, options.DryRun, flags.dryRun, "Do not change any state, just output what actions would be performed.") cmd.Flags().BoolVar(&flags.etcdUpgrade, "etcd-upgrade", flags.etcdUpgrade, "Perform the upgrade of etcd.") cmd.Flags().BoolVar(&flags.renewCerts, options.CertificateRenewal, flags.renewCerts, "Perform the renewal of certificates used by component changed during upgrades.") - cmd.Flags().DurationVar(&flags.imagePullTimeout, "image-pull-timeout", flags.imagePullTimeout, "The maximum amount of time to wait for the control plane pods to be downloaded.") options.AddKustomizePodsFlag(cmd.Flags(), &flags.kustomizeDir) return cmd @@ -145,22 +136,7 @@ func runApply(flags *applyFlags, userVersion string) error { } } - // Set the timeout as flags.imagePullTimeout to ensure that Prepuller truly respects 'image-pull-timeout' flag - waiter := getWaiter(flags.dryRun, client, flags.imagePullTimeout) - - // Use a prepuller implementation based on creating DaemonSets - // and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally - klog.V(1).Infoln("[upgrade/apply] creating prepuller") - prepuller := upgrade.NewDaemonSetPrepuller(client, waiter, &cfg.ClusterConfiguration) - componentsToPrepull := constants.ControlPlaneComponents - if cfg.Etcd.External == nil && flags.etcdUpgrade { - componentsToPrepull = append(componentsToPrepull, constants.Etcd) - } - if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout, componentsToPrepull); err != nil { - return errors.Wrap(err, "[upgrade/prepull] Failed prepulled the images for the control plane components error") - } - - waiter = getWaiter(flags.dryRun, client, upgrade.UpgradeManifestTimeout) + waiter := getWaiter(flags.dryRun, client, upgrade.UpgradeManifestTimeout) // Now; perform the upgrade procedure klog.V(1).Infoln("[upgrade/apply] performing upgrade") diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index a914de2c4d4..a3372a4e605 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -187,8 +187,6 @@ const ( TLSBootstrapRetryInterval = 5 * time.Second // PullImageRetry specifies how many times ContainerRuntime retries when pulling image failed PullImageRetry = 5 - // PrepullImagesInParallelTimeout specifies how long kubeadm should wait for prepulling images in parallel before timing out - PrepullImagesInParallelTimeout = 10 * time.Second // DefaultControlPlaneTimeout specifies the default control plane (actually API Server) timeout for use by kubeadm DefaultControlPlaneTimeout = 4 * time.Minute diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index 620b5f16067..206afc65386 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -8,7 +8,6 @@ go_library( "policy.go", "postupgrade.go", "preflight.go", - "prepull.go", "staticpods.go", "versiongetter.go", ], @@ -76,7 +75,6 @@ go_test( "compute_test.go", "policy_test.go", "postupgrade_test.go", - "prepull_test.go", "staticpods_test.go", ], embed = [":go_default_library"], diff --git a/cmd/kubeadm/app/phases/upgrade/prepull.go b/cmd/kubeadm/app/phases/upgrade/prepull.go deleted file mode 100644 index 91dc6c06cf1..00000000000 --- a/cmd/kubeadm/app/phases/upgrade/prepull.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upgrade - -import ( - "fmt" - "time" - - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/images" - "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - utilpointer "k8s.io/utils/pointer" -) - -const ( - prepullPrefix = "upgrade-prepull-" -) - -// Prepuller defines an interface for performing a prepull operation in a create-wait-delete fashion in parallel -type Prepuller interface { - CreateFunc(string) error - WaitFunc(string) - DeleteFunc(string) error -} - -// DaemonSetPrepuller makes sure the control-plane images are available on all control-planes -type DaemonSetPrepuller struct { - client clientset.Interface - cfg *kubeadmapi.ClusterConfiguration - waiter apiclient.Waiter -} - -// NewDaemonSetPrepuller creates a new instance of the DaemonSetPrepuller struct -func NewDaemonSetPrepuller(client clientset.Interface, waiter apiclient.Waiter, cfg *kubeadmapi.ClusterConfiguration) *DaemonSetPrepuller { - return &DaemonSetPrepuller{ - client: client, - cfg: cfg, - waiter: waiter, - } -} - -// CreateFunc creates a DaemonSet for making the image available on every relevant node -func (d *DaemonSetPrepuller) CreateFunc(component string) error { - var image string - if component == constants.Etcd { - image = images.GetEtcdImage(d.cfg) - } else { - image = images.GetKubernetesImage(component, d.cfg) - } - pauseImage := images.GetPauseImage(d.cfg) - ds := buildPrePullDaemonSet(component, image, pauseImage) - - // Create the DaemonSet in the API Server - if err := apiclient.CreateOrUpdateDaemonSet(d.client, ds); err != nil { - return errors.Wrapf(err, "unable to create a DaemonSet for prepulling the component %q", component) - } - return nil -} - -// WaitFunc waits for all Pods in the specified DaemonSet to be in the Running state -func (d *DaemonSetPrepuller) WaitFunc(component string) { - fmt.Printf("[upgrade/prepull] Prepulling image for component %s.\n", component) - d.waiter.WaitForPodsWithLabel("k8s-app=upgrade-prepull-" + component) -} - -// DeleteFunc deletes the DaemonSet used for making the image available on every relevant node -func (d *DaemonSetPrepuller) DeleteFunc(component string) error { - dsName := addPrepullPrefix(component) - // TODO: The IsNotFound() check is required in cases where the DaemonSet is missing. - // Investigate why this happens: https://github.com/kubernetes/kubeadm/issues/1700 - if err := apiclient.DeleteDaemonSetForeground(d.client, metav1.NamespaceSystem, dsName); err != nil && !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "unable to cleanup the DaemonSet used for prepulling %s", component) - } - fmt.Printf("[upgrade/prepull] Prepulled image for component %s.\n", component) - return nil -} - -// PrepullImagesInParallel creates DaemonSets synchronously but waits in parallel for the images to pull -func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration, componentsToPrepull []string) error { - fmt.Printf("[upgrade/prepull] Will prepull images for components %v\n", componentsToPrepull) - - timeoutChan := time.After(timeout) - - // Synchronously create the DaemonSets - for _, component := range componentsToPrepull { - if err := kubePrepuller.CreateFunc(component); err != nil { - return err - } - } - - // Create a channel for streaming data from goroutines that run in parallel to a blocking for loop that cleans up - prePulledChan := make(chan string, len(componentsToPrepull)) - for _, component := range componentsToPrepull { - go func(c string) { - // Wait as long as needed. This WaitFunc call should be blocking until completion - kubePrepuller.WaitFunc(c) - // When the task is done, go ahead and cleanup by sending the name to the channel - prePulledChan <- c - }(component) - } - - // This call blocks until all expected messages are received from the channel or errors out if timeoutChan fires. - // For every successful wait, kubePrepuller.DeleteFunc is executed - if err := waitForItemsFromChan(timeoutChan, prePulledChan, len(componentsToPrepull), kubePrepuller.DeleteFunc); err != nil { - return err - } - - fmt.Println("[upgrade/prepull] Successfully prepulled the images for all the control plane components") - return nil -} - -// waitForItemsFromChan waits for n elements from stringChan with a timeout. For every item received from stringChan, cleanupFunc is executed -func waitForItemsFromChan(timeoutChan <-chan time.Time, stringChan chan string, n int, cleanupFunc func(string) error) error { - i := 0 - for { - select { - case <-timeoutChan: - return errors.New("the prepull operation timed out") - case result := <-stringChan: - i++ - // If the cleanup function errors; error here as well - if err := cleanupFunc(result); err != nil { - return err - } - if i == n { - return nil - } - } - } -} - -// addPrepullPrefix adds the prepull prefix for this functionality; can be used in names, labels, etc. -func addPrepullPrefix(component string) string { - return fmt.Sprintf("%s%s", prepullPrefix, component) -} - -// buildPrePullDaemonSet builds the DaemonSet that ensures the control plane image is available -func buildPrePullDaemonSet(component, image, pauseImage string) *apps.DaemonSet { - return &apps.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: addPrepullPrefix(component), - Namespace: metav1.NamespaceSystem, - }, - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "k8s-app": addPrepullPrefix(component), - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "k8s-app": addPrepullPrefix(component), - }, - }, - Spec: v1.PodSpec{ - // Use an init container to prepull the target component image. - // Once the prepull completes, the "component --version" command is executed - // to get an exit code of 0. - // After the init container completes a regular container with "pause" - // will start to get this Pod in Running state with a blocking container process. - // Note that DaemonSet Pods can only use RestartPolicy of Always, so there has - // to be a blocking process to achieve the Running state. - InitContainers: []v1.Container{ - { - Name: component, - Image: image, - Command: []string{component, "--version"}, - }, - }, - Containers: []v1.Container{ - { - Name: "pause", - Image: pauseImage, - Command: []string{"/pause"}, - }, - }, - NodeSelector: map[string]string{ - constants.LabelNodeRoleMaster: "", - }, - Tolerations: []v1.Toleration{constants.ControlPlaneToleration}, - TerminationGracePeriodSeconds: utilpointer.Int64Ptr(0), - // Explicitly add a PodSecurityContext to allow these Pods to run as non-root. - // This prevents restrictive PSPs from blocking the Pod creation. - SecurityContext: &v1.PodSecurityContext{ - RunAsUser: utilpointer.Int64Ptr(999), - }, - }, - }, - }, - } -} diff --git a/cmd/kubeadm/app/phases/upgrade/prepull_test.go b/cmd/kubeadm/app/phases/upgrade/prepull_test.go deleted file mode 100644 index b7d574c81f2..00000000000 --- a/cmd/kubeadm/app/phases/upgrade/prepull_test.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upgrade - -import ( - "testing" - "time" - - "github.com/pkg/errors" - - "k8s.io/kubernetes/cmd/kubeadm/app/constants" - //"k8s.io/apimachinery/pkg/util/version" -) - -// failedCreatePrepuller is a fake prepuller that errors for kube-controller-manager in the CreateFunc call -type failedCreatePrepuller struct{} - -func NewFailedCreatePrepuller() Prepuller { - return &failedCreatePrepuller{} -} - -func (p *failedCreatePrepuller) CreateFunc(component string) error { - if component == "kube-controller-manager" { - return errors.New("boo") - } - return nil -} - -func (p *failedCreatePrepuller) WaitFunc(component string) {} - -func (p *failedCreatePrepuller) DeleteFunc(component string) error { - return nil -} - -// foreverWaitPrepuller is a fake prepuller that basically waits "forever" (10 mins, but longer than the 10sec timeout) -type foreverWaitPrepuller struct{} - -func NewForeverWaitPrepuller() Prepuller { - return &foreverWaitPrepuller{} -} - -func (p *foreverWaitPrepuller) CreateFunc(component string) error { - return nil -} - -func (p *foreverWaitPrepuller) WaitFunc(component string) { - time.Sleep(10 * time.Minute) -} - -func (p *foreverWaitPrepuller) DeleteFunc(component string) error { - return nil -} - -// failedDeletePrepuller is a fake prepuller that errors for kube-scheduler in the DeleteFunc call -type failedDeletePrepuller struct{} - -func NewFailedDeletePrepuller() Prepuller { - return &failedDeletePrepuller{} -} - -func (p *failedDeletePrepuller) CreateFunc(component string) error { - return nil -} - -func (p *failedDeletePrepuller) WaitFunc(component string) {} - -func (p *failedDeletePrepuller) DeleteFunc(component string) error { - if component == "kube-scheduler" { - return errors.New("boo") - } - return nil -} - -// goodPrepuller is a fake prepuller that works as expected -type goodPrepuller struct{} - -func NewGoodPrepuller() Prepuller { - return &goodPrepuller{} -} - -func (p *goodPrepuller) CreateFunc(component string) error { - time.Sleep(300 * time.Millisecond) - return nil -} - -func (p *goodPrepuller) WaitFunc(component string) { - time.Sleep(300 * time.Millisecond) -} - -func (p *goodPrepuller) DeleteFunc(component string) error { - time.Sleep(300 * time.Millisecond) - return nil -} - -func TestPrepullImagesInParallel(t *testing.T) { - tests := []struct { - name string - p Prepuller - timeout time.Duration - expectedErr bool - }{ - { - name: "should error out; create failed", - p: NewFailedCreatePrepuller(), - timeout: constants.PrepullImagesInParallelTimeout, - expectedErr: true, - }, - { - name: "should error out; timeout exceeded", - p: NewForeverWaitPrepuller(), - timeout: constants.PrepullImagesInParallelTimeout, - expectedErr: true, - }, - { - name: "should error out; delete failed", - p: NewFailedDeletePrepuller(), - timeout: constants.PrepullImagesInParallelTimeout, - expectedErr: true, - }, - { - name: "should work just fine", - p: NewGoodPrepuller(), - timeout: constants.PrepullImagesInParallelTimeout, - expectedErr: false, - }, - } - - for _, rt := range tests { - t.Run(rt.name, func(t *testing.T) { - actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.ControlPlaneComponents, constants.Etcd)) - if (actualErr != nil) != rt.expectedErr { - t.Errorf( - "failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t", - rt.expectedErr, - (actualErr != nil), - ) - } - }) - } -} From a1693052070e588401ca620b17bb206112543cfe Mon Sep 17 00:00:00 2001 From: Xianglin Gao Date: Thu, 7 May 2020 17:31:01 +0800 Subject: [PATCH 2/2] kubeadm: add pull images check in upgrade apply and upgrade node Signed-off-by: Xianglin Gao --- cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD | 4 + .../app/cmd/phases/upgrade/node/data.go | 2 + .../app/cmd/phases/upgrade/node/preflight.go | 73 +++++++++++++++++++ cmd/kubeadm/app/cmd/upgrade/BUILD | 1 + cmd/kubeadm/app/cmd/upgrade/apply.go | 30 +++++++- cmd/kubeadm/app/cmd/upgrade/node.go | 63 ++++++++++------ 6 files changed, 148 insertions(+), 25 deletions(-) create mode 100644 cmd/kubeadm/app/cmd/phases/upgrade/node/preflight.go diff --git a/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD b/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD index dfd82184355..f5d74dcac4a 100644 --- a/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD +++ b/cmd/kubeadm/app/cmd/phases/upgrade/node/BUILD @@ -6,6 +6,7 @@ go_library( "controlplane.go", "data.go", "kubeletconfig.go", + "preflight.go", ], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/upgrade/node", visibility = ["//visibility:public"], @@ -17,11 +18,14 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/kubelet:go_default_library", "//cmd/kubeadm/app/phases/upgrade:go_default_library", + "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/dryrun:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/phases/upgrade/node/data.go b/cmd/kubeadm/app/cmd/phases/upgrade/node/data.go index 6e2c33604ff..27878ad4308 100644 --- a/cmd/kubeadm/app/cmd/phases/upgrade/node/data.go +++ b/cmd/kubeadm/app/cmd/phases/upgrade/node/data.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) @@ -31,5 +32,6 @@ type Data interface { Cfg() *kubeadmapi.InitConfiguration IsControlPlaneNode() bool Client() clientset.Interface + IgnorePreflightErrors() sets.String KustomizeDir() string } diff --git a/cmd/kubeadm/app/cmd/phases/upgrade/node/preflight.go b/cmd/kubeadm/app/cmd/phases/upgrade/node/preflight.go new file mode 100644 index 00000000000..4bcfc5aa58a --- /dev/null +++ b/cmd/kubeadm/app/cmd/phases/upgrade/node/preflight.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "fmt" + + "github.com/pkg/errors" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" + "k8s.io/kubernetes/cmd/kubeadm/app/preflight" + utilsexec "k8s.io/utils/exec" +) + +// NewPreflightPhase creates a kubeadm workflow phase that implements preflight checks for a new node join +func NewPreflightPhase() workflow.Phase { + return workflow.Phase{ + Name: "preflight", + Short: "Run upgrade node pre-flight checks", + Long: "Run pre-flight checks for kubeadm upgrade node.", + Run: runPreflight, + InheritFlags: []string{ + options.IgnorePreflightErrors, + }, + } +} + +// runPreflight executes preflight checks logic. +func runPreflight(c workflow.RunData) error { + data, ok := c.(Data) + if !ok { + return errors.New("preflight phase invoked with an invalid data struct") + } + fmt.Println("[preflight] Running pre-flight checks") + + // First, check if we're root separately from the other preflight checks and fail fast + if err := preflight.RunRootCheckOnly(data.IgnorePreflightErrors()); err != nil { + return err + } + + // if this is a control-plane node, pull the basic images + if data.IsControlPlaneNode() { + if !data.DryRun() { + fmt.Println("[preflight] Pulling images required for setting up a Kubernetes cluster") + fmt.Println("[preflight] This might take a minute or two, depending on the speed of your internet connection") + fmt.Println("[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'") + if err := preflight.RunPullImagesCheck(utilsexec.New(), data.Cfg(), data.IgnorePreflightErrors()); err != nil { + return err + } + } else { + fmt.Println("[preflight] Would pull the required images (like 'kubeadm config images pull')") + } + } else { + fmt.Println("[preflight] Skipping prepull. Not a control plane node.") + return nil + } + + return nil +} diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index a8ea89fc999..a54783b6427 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -42,6 +42,7 @@ go_library( "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index b9ab8bc5fe4..f2fc89b7ccb 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -18,9 +18,11 @@ package upgrade import ( "fmt" + "time" "github.com/pkg/errors" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" @@ -28,9 +30,15 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" + "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" + utilsexec "k8s.io/utils/exec" +) + +const ( + defaultImagePullTimeout = 15 * time.Minute ) // applyFlags holds the information about the flags that can be passed to apply @@ -42,6 +50,7 @@ type applyFlags struct { dryRun bool etcdUpgrade bool renewCerts bool + imagePullTimeout time.Duration kustomizeDir string } @@ -53,9 +62,10 @@ func (f *applyFlags) sessionIsInteractive() bool { // NewCmdApply returns the cobra command for `kubeadm upgrade apply` func NewCmdApply(apf *applyPlanFlags) *cobra.Command { flags := &applyFlags{ - applyPlanFlags: apf, - etcdUpgrade: true, - renewCerts: true, + applyPlanFlags: apf, + imagePullTimeout: defaultImagePullTimeout, + etcdUpgrade: true, + renewCerts: true, } cmd := &cobra.Command{ @@ -80,6 +90,9 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command { cmd.Flags().BoolVar(&flags.dryRun, options.DryRun, flags.dryRun, "Do not change any state, just output what actions would be performed.") cmd.Flags().BoolVar(&flags.etcdUpgrade, "etcd-upgrade", flags.etcdUpgrade, "Perform the upgrade of etcd.") cmd.Flags().BoolVar(&flags.renewCerts, options.CertificateRenewal, flags.renewCerts, "Perform the renewal of certificates used by component changed during upgrades.") + cmd.Flags().DurationVar(&flags.imagePullTimeout, "image-pull-timeout", flags.imagePullTimeout, "The maximum amount of time to wait for the control plane pods to be downloaded.") + // TODO: The flag was deprecated in 1.19; remove the flag following a GA deprecation policy of 12 months or 2 releases (whichever is longer) + cmd.Flags().MarkDeprecated("image-pull-timeout", "This flag is deprecated and will be removed in a future version.") options.AddKustomizePodsFlag(cmd.Flags(), &flags.kustomizeDir) return cmd @@ -136,6 +149,17 @@ func runApply(flags *applyFlags, userVersion string) error { } } + if !flags.dryRun { + fmt.Println("[upgrade/prepull] Pulling images required for setting up a Kubernetes cluster") + fmt.Println("[upgrade/prepull] This might take a minute or two, depending on the speed of your internet connection") + fmt.Println("[upgrade/prepull] You can also perform this action in beforehand using 'kubeadm config images pull'") + if err := preflight.RunPullImagesCheck(utilsexec.New(), cfg, sets.NewString(cfg.NodeRegistration.IgnorePreflightErrors...)); err != nil { + return err + } + } else { + fmt.Println("[upgrade/prepull] Would pull the required images (like 'kubeadm config images pull')") + } + waiter := getWaiter(flags.dryRun, client, upgrade.UpgradeManifestTimeout) // Now; perform the upgrade procedure diff --git a/cmd/kubeadm/app/cmd/upgrade/node.go b/cmd/kubeadm/app/cmd/upgrade/node.go index db53d184f4c..ee404130ad9 100644 --- a/cmd/kubeadm/app/cmd/upgrade/node.go +++ b/cmd/kubeadm/app/cmd/upgrade/node.go @@ -23,8 +23,10 @@ import ( "github.com/spf13/cobra" flag "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" phases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/upgrade/node" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" @@ -36,12 +38,13 @@ import ( // Please note that this structure includes the public kubeadm config API, but only a subset of the options // supported by this api will be exposed as a flag. type nodeOptions struct { - kubeConfigPath string - kubeletVersion string - etcdUpgrade bool - renewCerts bool - dryRun bool - kustomizeDir string + kubeConfigPath string + kubeletVersion string + etcdUpgrade bool + renewCerts bool + dryRun bool + kustomizeDir string + ignorePreflightErrors []string } // compile-time assert that the local data object satisfies the phases data interface. @@ -50,14 +53,15 @@ var _ phases.Data = &nodeData{} // nodeData defines all the runtime information used when running the kubeadm upgrade node worklow; // this data is shared across all the phases that are included in the workflow. type nodeData struct { - etcdUpgrade bool - renewCerts bool - dryRun bool - kubeletVersion string - cfg *kubeadmapi.InitConfiguration - isControlPlaneNode bool - client clientset.Interface - kustomizeDir string + etcdUpgrade bool + renewCerts bool + dryRun bool + kubeletVersion string + cfg *kubeadmapi.InitConfiguration + isControlPlaneNode bool + client clientset.Interface + kustomizeDir string + ignorePreflightErrors sets.String } // NewCmdNode returns the cobra command for `kubeadm upgrade node` @@ -80,6 +84,7 @@ func NewCmdNode() *cobra.Command { options.AddKustomizePodsFlag(cmd.Flags(), &nodeOptions.kustomizeDir) // initialize the workflow runner with the list of phases + nodeRunner.AppendPhase(phases.NewPreflightPhase()) nodeRunner.AppendPhase(phases.NewControlPlane()) nodeRunner.AppendPhase(phases.NewKubeletConfigPhase()) @@ -113,6 +118,7 @@ func addUpgradeNodeFlags(flagSet *flag.FlagSet, nodeOptions *nodeOptions) { flagSet.MarkDeprecated(options.KubeletVersion, "This flag is deprecated and will be removed in a future version.") flagSet.BoolVar(&nodeOptions.renewCerts, options.CertificateRenewal, nodeOptions.renewCerts, "Perform the renewal of certificates used by component changed during upgrades.") flagSet.BoolVar(&nodeOptions.etcdUpgrade, options.EtcdUpgrade, nodeOptions.etcdUpgrade, "Perform the upgrade of etcd.") + flagSet.StringSliceVar(&nodeOptions.ignorePreflightErrors, options.IgnorePreflightErrors, nodeOptions.ignorePreflightErrors, "A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.") } // newNodeData returns a new nodeData struct to be used for the execution of the kubeadm upgrade node workflow. @@ -140,15 +146,23 @@ func newNodeData(cmd *cobra.Command, args []string, options *nodeOptions) (*node return nil, errors.Wrap(err, "unable to fetch the kubeadm-config ConfigMap") } + ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors, cfg.NodeRegistration.IgnorePreflightErrors) + if err != nil { + return nil, err + } + // Also set the union of pre-flight errors to JoinConfiguration, to provide a consistent view of the runtime configuration: + cfg.NodeRegistration.IgnorePreflightErrors = ignorePreflightErrorsSet.List() + return &nodeData{ - etcdUpgrade: options.etcdUpgrade, - renewCerts: options.renewCerts, - dryRun: options.dryRun, - kubeletVersion: options.kubeletVersion, - cfg: cfg, - client: client, - isControlPlaneNode: isControlPlaneNode, - kustomizeDir: options.kustomizeDir, + etcdUpgrade: options.etcdUpgrade, + renewCerts: options.renewCerts, + dryRun: options.dryRun, + kubeletVersion: options.kubeletVersion, + cfg: cfg, + client: client, + isControlPlaneNode: isControlPlaneNode, + kustomizeDir: options.kustomizeDir, + ignorePreflightErrors: ignorePreflightErrorsSet, }, nil } @@ -191,3 +205,8 @@ func (d *nodeData) Client() clientset.Interface { func (d *nodeData) KustomizeDir() string { return d.kustomizeDir } + +// IgnorePreflightErrors returns the list of preflight errors to ignore. +func (d *nodeData) IgnorePreflightErrors() sets.String { + return d.ignorePreflightErrors +}