mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
[kubeadm/app/util]switch to github.com/pkg/errors
Signed-off-by: yuexiao-wang <wang.yuexiao@zte.com.cn>
This commit is contained in:
parent
5336866a8c
commit
f15410692e
@ -49,6 +49,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -116,11 +118,11 @@ func masterNodesReady(client clientset.Interface) error {
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't list masters in cluster: %v", err)
|
||||
return errors.Wrap(err, "couldn't list masters in cluster")
|
||||
}
|
||||
|
||||
if len(masters.Items) == 0 {
|
||||
return fmt.Errorf("failed to find any nodes with master role")
|
||||
return errors.New("failed to find any nodes with master role")
|
||||
}
|
||||
|
||||
notReadyMasters := getNotReadyNodes(masters.Items)
|
||||
@ -192,10 +194,10 @@ func daemonSetHealth(dsStatus *apps.DaemonSetStatus) error {
|
||||
return fmt.Errorf("current number of scheduled Pods ('%d') doesn't match the amount of desired Pods ('%d')", dsStatus.CurrentNumberScheduled, dsStatus.DesiredNumberScheduled)
|
||||
}
|
||||
if dsStatus.NumberAvailable == 0 {
|
||||
return fmt.Errorf("no available Pods for DaemonSet")
|
||||
return errors.New("no available Pods for DaemonSet")
|
||||
}
|
||||
if dsStatus.NumberReady == 0 {
|
||||
return fmt.Errorf("no ready Pods for DaemonSet")
|
||||
return errors.New("no ready Pods for DaemonSet")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -22,6 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
@ -253,30 +255,30 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP
|
||||
func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.InitConfiguration, recoverManifests map[string]string, isTLSUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) {
|
||||
// Add etcd static pod spec only if external etcd is not configured
|
||||
if cfg.Etcd.External != nil {
|
||||
return false, fmt.Errorf("external etcd detected, won't try to change any etcd state")
|
||||
return false, errors.New("external etcd detected, won't try to change any etcd state")
|
||||
}
|
||||
|
||||
// Checking health state of etcd before proceeding with the upgrade
|
||||
_, err := oldEtcdClient.GetClusterStatus()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("etcd cluster is not healthy: %v", err)
|
||||
return true, errors.Wrap(err, "etcd cluster is not healthy")
|
||||
}
|
||||
|
||||
// Backing up etcd data store
|
||||
backupEtcdDir := pathMgr.BackupEtcdDir()
|
||||
runningEtcdDir := cfg.Etcd.Local.DataDir
|
||||
if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil {
|
||||
return true, fmt.Errorf("failed to back up etcd data: %v", err)
|
||||
return true, errors.Wrap(err, "failed to back up etcd data")
|
||||
}
|
||||
|
||||
// Need to check currently used version and version from constants, if differs then upgrade
|
||||
desiredEtcdVersion, err := constants.EtcdSupportedVersion(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to retrieve an etcd version for the target kubernetes version: %v", err)
|
||||
return true, errors.Wrap(err, "failed to retrieve an etcd version for the target kubernetes version")
|
||||
}
|
||||
currentEtcdVersionStr, err := oldEtcdClient.GetVersion()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to retrieve the current etcd version: %v", err)
|
||||
return true, errors.Wrap(err, "failed to retrieve the current etcd version")
|
||||
}
|
||||
currentEtcdVersion, err := version.ParseSemantic(currentEtcdVersionStr)
|
||||
if err != nil {
|
||||
@ -294,13 +296,13 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
|
||||
beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeRegistration.Name, constants.Etcd)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to get etcd pod's hash: %v", err)
|
||||
return true, errors.Wrap(err, "failed to get etcd pod's hash")
|
||||
}
|
||||
|
||||
// Write the updated etcd static Pod manifest into the temporary directory, at this point no etcd change
|
||||
// has occurred in any aspects.
|
||||
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.TempManifestDir(), cfg); err != nil {
|
||||
return true, fmt.Errorf("error creating local etcd static pod manifest file: %v", err)
|
||||
return true, errors.Wrap(err, "error creating local etcd static pod manifest file")
|
||||
}
|
||||
|
||||
// Waiter configurations for checking etcd status
|
||||
@ -346,7 +348,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
fmt.Println("[upgrade/etcd] Etcd was rolled back and is now available")
|
||||
|
||||
// Since etcd cluster came back up with the old manifest
|
||||
return true, fmt.Errorf("fatal error when trying to upgrade the etcd cluster: %v, rolled the state back to pre-upgrade state", err)
|
||||
return true, errors.Wrap(err, "fatal error when trying to upgrade the etcd cluster, rolled the state back to pre-upgrade state")
|
||||
}
|
||||
|
||||
// Initialize the new etcd client if it wasn't pre-initialized
|
||||
@ -357,7 +359,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
cfg.CertificatesDir,
|
||||
)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("fatal error creating etcd client: %v", err)
|
||||
return true, errors.Wrap(err, "fatal error creating etcd client")
|
||||
}
|
||||
newEtcdClient = client
|
||||
}
|
||||
@ -390,7 +392,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM
|
||||
fmt.Println("[upgrade/etcd] Etcd was rolled back and is now available")
|
||||
|
||||
// We've successfully rolled back etcd, and now return an error describing that the upgrade failed
|
||||
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, rolled the state back to pre-upgrade state", err)
|
||||
return true, errors.Wrap(err, "fatal error upgrading local etcd cluster, rolled the state back to pre-upgrade state")
|
||||
}
|
||||
|
||||
return false, nil
|
||||
@ -418,7 +420,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
|
||||
cfg.Etcd.External.KeyFile,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create etcd client for external etcd: %v", err)
|
||||
return errors.Wrap(err, "failed to create etcd client for external etcd")
|
||||
}
|
||||
oldEtcdClient = client
|
||||
// Since etcd is managed externally, the new etcd client will be the same as the old client
|
||||
@ -433,7 +435,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
|
||||
cfg.CertificatesDir,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create etcd client: %v", err)
|
||||
return errors.Wrap(err, "failed to create etcd client")
|
||||
}
|
||||
oldEtcdClient = client
|
||||
}
|
||||
@ -463,7 +465,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager
|
||||
fmt.Printf("[upgrade/staticpods] Writing new Static Pod manifests to %q\n", pathMgr.TempManifestDir())
|
||||
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating init static pod manifest files: %v", err)
|
||||
return errors.Wrap(err, "error creating init static pod manifest files")
|
||||
}
|
||||
|
||||
for _, component := range constants.MasterComponents {
|
||||
@ -497,7 +499,7 @@ func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr
|
||||
}
|
||||
}
|
||||
// Let the user know there were problems, but we tried to recover
|
||||
return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs)
|
||||
return errors.New("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced")
|
||||
}
|
||||
|
||||
// rollbackEtcdData rolls back the content of etcd folder if something went wrong.
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
versionutil "k8s.io/apimachinery/pkg/util/version"
|
||||
@ -59,13 +61,13 @@ func NewKubeVersionGetter(client clientset.Interface, writer io.Writer) VersionG
|
||||
func (g *KubeVersionGetter) ClusterVersion() (string, *versionutil.Version, error) {
|
||||
clusterVersionInfo, err := g.client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("Couldn't fetch cluster version from the API Server: %v", err)
|
||||
return "", nil, errors.Wrap(err, "Couldn't fetch cluster version from the API Server")
|
||||
}
|
||||
fmt.Fprintf(g.w, "[upgrade/versions] Cluster version: %s\n", clusterVersionInfo.String())
|
||||
|
||||
clusterVersion, err := versionutil.ParseSemantic(clusterVersionInfo.String())
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("Couldn't parse cluster version: %v", err)
|
||||
return "", nil, errors.Wrap(err, "Couldn't parse cluster version")
|
||||
}
|
||||
return clusterVersionInfo.String(), clusterVersion, nil
|
||||
}
|
||||
@ -77,7 +79,7 @@ func (g *KubeVersionGetter) KubeadmVersion() (string, *versionutil.Version, erro
|
||||
|
||||
kubeadmVersion, err := versionutil.ParseSemantic(kubeadmVersionInfo.String())
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("Couldn't parse kubeadm version: %v", err)
|
||||
return "", nil, errors.Wrap(err, "Couldn't parse kubeadm version")
|
||||
}
|
||||
return kubeadmVersionInfo.String(), kubeadmVersion, nil
|
||||
}
|
||||
@ -104,7 +106,7 @@ func (g *KubeVersionGetter) VersionFromCILabel(ciVersionLabel, description strin
|
||||
func (g *KubeVersionGetter) KubeletVersions() (map[string]uint16, error) {
|
||||
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list all nodes in cluster")
|
||||
return nil, errors.New("couldn't list all nodes in cluster")
|
||||
}
|
||||
return computeKubeletVersions(nodes.Items), nil
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -41,6 +41,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -29,6 +29,8 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ClientBackedDryRunGetter implements the DryRunGetter interface for use in NewDryRunClient() and proxies all GET and LIST requests to the backing API server reachable via rest.Config
|
||||
@ -61,11 +63,11 @@ func NewClientBackedDryRunGetter(config *rest.Config) (*ClientBackedDryRunGetter
|
||||
func NewClientBackedDryRunGetterFromKubeconfig(file string) (*ClientBackedDryRunGetter, error) {
|
||||
config, err := clientcmd.LoadFromFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load kubeconfig: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to load kubeconfig")
|
||||
}
|
||||
clientConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create API client configuration from kubeconfig: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to create API client configuration from kubeconfig")
|
||||
}
|
||||
return NewClientBackedDryRunGetter(clientConfig)
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -114,7 +115,7 @@ func NewDryRunClientWithOpts(opts DryRunClientOptions) clientset.Interface {
|
||||
getAction, ok := action.(core.GetAction)
|
||||
if !ok {
|
||||
// something's wrong, we can't handle this event
|
||||
return true, nil, fmt.Errorf("can't cast get reactor event action object to GetAction interface")
|
||||
return true, nil, errors.New("can't cast get reactor event action object to GetAction interface")
|
||||
}
|
||||
handled, obj, err := opts.Getter.HandleGetAction(getAction)
|
||||
|
||||
@ -139,7 +140,7 @@ func NewDryRunClientWithOpts(opts DryRunClientOptions) clientset.Interface {
|
||||
listAction, ok := action.(core.ListAction)
|
||||
if !ok {
|
||||
// something's wrong, we can't handle this event
|
||||
return true, nil, fmt.Errorf("can't cast list reactor event action object to ListAction interface")
|
||||
return true, nil, errors.New("can't cast list reactor event action object to ListAction interface")
|
||||
}
|
||||
handled, objs, err := opts.Getter.HandleListAction(listAction)
|
||||
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
@ -40,11 +41,11 @@ import (
|
||||
func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error {
|
||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create configmap: %v", err)
|
||||
return errors.Wrap(err, "unable to create configmap")
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(cm); err != nil {
|
||||
return fmt.Errorf("unable to update configmap: %v", err)
|
||||
return errors.Wrap(err, "unable to update configmap")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -58,7 +59,7 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi
|
||||
}
|
||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create configmap: %v", err)
|
||||
return errors.Wrap(err, "unable to create configmap")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -69,11 +70,11 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi
|
||||
func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
||||
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(secret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create secret: %v", err)
|
||||
return errors.Wrap(err, "unable to create secret")
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(secret); err != nil {
|
||||
return fmt.Errorf("unable to update secret: %v", err)
|
||||
return errors.Wrap(err, "unable to update secret")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -85,7 +86,7 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco
|
||||
// Note: We don't run .Update here afterwards as that's probably not required
|
||||
// Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create serviceaccount: %v", err)
|
||||
return errors.Wrap(err, "unable to create serviceaccount")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -95,11 +96,11 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco
|
||||
func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error {
|
||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create deployment: %v", err)
|
||||
return errors.Wrap(err, "unable to create deployment")
|
||||
}
|
||||
|
||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil {
|
||||
return fmt.Errorf("unable to update deployment: %v", err)
|
||||
return errors.Wrap(err, "unable to update deployment")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -109,11 +110,11 @@ func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deploymen
|
||||
func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error {
|
||||
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(ds); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create daemonset: %v", err)
|
||||
return errors.Wrap(err, "unable to create daemonset")
|
||||
}
|
||||
|
||||
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(ds); err != nil {
|
||||
return fmt.Errorf("unable to update daemonset: %v", err)
|
||||
return errors.Wrap(err, "unable to update daemonset")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -141,11 +142,11 @@ func DeleteDeploymentForeground(client clientset.Interface, namespace, name stri
|
||||
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create RBAC role: %v", err)
|
||||
return errors.Wrap(err, "unable to create RBAC role")
|
||||
}
|
||||
|
||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(role); err != nil {
|
||||
return fmt.Errorf("unable to update RBAC role: %v", err)
|
||||
return errors.Wrap(err, "unable to update RBAC role")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -155,11 +156,11 @@ func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
||||
func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {
|
||||
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(roleBinding); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create RBAC rolebinding: %v", err)
|
||||
return errors.Wrap(err, "unable to create RBAC rolebinding")
|
||||
}
|
||||
|
||||
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(roleBinding); err != nil {
|
||||
return fmt.Errorf("unable to update RBAC rolebinding: %v", err)
|
||||
return errors.Wrap(err, "unable to update RBAC rolebinding")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -169,11 +170,11 @@ func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.Rol
|
||||
func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {
|
||||
if _, err := client.RbacV1().ClusterRoles().Create(clusterRole); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create RBAC clusterrole: %v", err)
|
||||
return errors.Wrap(err, "unable to create RBAC clusterrole")
|
||||
}
|
||||
|
||||
if _, err := client.RbacV1().ClusterRoles().Update(clusterRole); err != nil {
|
||||
return fmt.Errorf("unable to update RBAC clusterrole: %v", err)
|
||||
return errors.Wrap(err, "unable to update RBAC clusterrole")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -183,11 +184,11 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.Clu
|
||||
func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {
|
||||
if _, err := client.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("unable to create RBAC clusterrolebinding: %v", err)
|
||||
return errors.Wrap(err, "unable to create RBAC clusterrolebinding")
|
||||
}
|
||||
|
||||
if _, err := client.RbacV1().ClusterRoleBindings().Update(clusterRoleBinding); err != nil {
|
||||
return fmt.Errorf("unable to update RBAC clusterrolebinding: %v", err)
|
||||
return errors.Wrap(err, "unable to update RBAC clusterrolebinding")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -143,7 +144,7 @@ func (w *KubeWaiter) WaitForHealthyKubelet(initalTimeout time.Duration, healthzE
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Printf("[kubelet-check] It seems like the kubelet isn't running or healthy.")
|
||||
fmt.Printf("[kubelet-check] The HTTP call equal to 'curl -sSL %s' returned HTTP code %d\n", healthzEndpoint, resp.StatusCode)
|
||||
return fmt.Errorf("the kubelet healthz endpoint is unhealthy")
|
||||
return errors.New("the kubelet healthz endpoint is unhealthy")
|
||||
}
|
||||
return nil
|
||||
}, 5) // a failureThreshold of five means waiting for a total of 155 seconds
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BuildArgumentListFromMap takes two string-string maps, one with the base arguments and one
|
||||
@ -89,10 +91,10 @@ func ReplaceArgument(command []string, argMutateFunc func(map[string]string) map
|
||||
// parseArgument parses the argument "--foo=bar" to "foo" and "bar"
|
||||
func parseArgument(arg string) (string, string, error) {
|
||||
if !strings.HasPrefix(arg, "--") {
|
||||
return "", "", fmt.Errorf("the argument should start with '--'")
|
||||
return "", "", errors.New("the argument should start with '--'")
|
||||
}
|
||||
if !strings.Contains(arg, "=") {
|
||||
return "", "", fmt.Errorf("the argument should have a '=' between the flag and the value")
|
||||
return "", "", errors.New("the argument should have a '=' between the flag and the value")
|
||||
}
|
||||
// Remove the starting --
|
||||
arg = strings.TrimPrefix(arg, "--")
|
||||
@ -101,10 +103,10 @@ func parseArgument(arg string) (string, string, error) {
|
||||
|
||||
// Make sure both a key and value is present
|
||||
if len(keyvalSlice) != 2 {
|
||||
return "", "", fmt.Errorf("the argument must have both a key and a value")
|
||||
return "", "", errors.New("the argument must have both a key and a value")
|
||||
}
|
||||
if len(keyvalSlice[0]) == 0 {
|
||||
return "", "", fmt.Errorf("the argument must have a key")
|
||||
return "", "", errors.New("the argument must have a key")
|
||||
}
|
||||
|
||||
return keyvalSlice[0], keyvalSlice[1], nil
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
utilsexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
@ -51,7 +53,7 @@ func validateCgroupDriver(driver string) error {
|
||||
func callDockerInfo(execer utilsexec.Interface) (string, error) {
|
||||
out, err := execer.Command("docker", "info").Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot execute 'docker info': %v", err)
|
||||
return "", errors.Wrap(err, "cannot execute 'docker info'")
|
||||
}
|
||||
return string(out), nil
|
||||
}
|
||||
@ -71,5 +73,5 @@ func getCgroupDriverFromDockerInfo(info string) (string, error) {
|
||||
}
|
||||
return driver, nil
|
||||
}
|
||||
return "", fmt.Errorf("cgroup driver is not defined in 'docker info'")
|
||||
return "", errors.New("cgroup driver is not defined in 'docker info'")
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/util/staticpod:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/pkg/errors"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod"
|
||||
@ -58,7 +59,7 @@ func PodManifestsHaveTLS(ManifestDir string) (bool, error) {
|
||||
etcdPodPath := constants.GetStaticPodFilepath(constants.Etcd, ManifestDir)
|
||||
etcdPod, err := staticpod.ReadStaticPodFromDisk(etcdPodPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to check if etcd pod implements TLS: %v", err)
|
||||
return false, errors.Wrap(err, "failed to check if etcd pod implements TLS")
|
||||
}
|
||||
|
||||
tlsFlags := []string{
|
||||
@ -140,7 +141,7 @@ func (c Client) GetVersion() (string, error) {
|
||||
clusterVersion = v
|
||||
}
|
||||
if clusterVersion == "" {
|
||||
return "", fmt.Errorf("could not determine cluster etcd version")
|
||||
return "", errors.New("could not determine cluster etcd version")
|
||||
}
|
||||
return clusterVersion, nil
|
||||
}
|
||||
@ -215,7 +216,7 @@ func (c Client) WaitForClusterAvailable(delay time.Duration, retries int, retryI
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
return false, fmt.Errorf("timeout waiting for etcd cluster to be available")
|
||||
return false, errors.New("timeout waiting for etcd cluster to be available")
|
||||
}
|
||||
|
||||
// CheckConfigurationIsHA returns true if the given InitConfiguration etcd block appears to be an HA configuration.
|
||||
|
@ -20,6 +20,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -22,6 +22,8 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CreateBasic creates a basic, general KubeConfig object that then can be extended
|
||||
@ -70,7 +72,7 @@ func CreateWithToken(serverURL, clusterName, userName string, caCert []byte, tok
|
||||
func ClientSetFromFile(path string) (*clientset.Clientset, error) {
|
||||
config, err := clientcmd.LoadFromFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load admin kubeconfig [%v]", err)
|
||||
return nil, errors.Wrap(err, "failed to load admin kubeconfig")
|
||||
}
|
||||
return ToClientSet(config)
|
||||
}
|
||||
@ -79,12 +81,12 @@ func ClientSetFromFile(path string) (*clientset.Clientset, error) {
|
||||
func ToClientSet(config *clientcmdapi.Config) (*clientset.Clientset, error) {
|
||||
clientConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create API client configuration from kubeconfig: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to create API client configuration from kubeconfig")
|
||||
}
|
||||
|
||||
client, err := clientset.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create API client: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to create API client")
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ go_library(
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Validator = &CgroupsValidator{}
|
||||
@ -40,7 +42,7 @@ const (
|
||||
func (c *CgroupsValidator) Validate(spec SysSpec) (error, error) {
|
||||
subsystems, err := c.getCgroupSubsystems()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to get cgroup subsystems")
|
||||
}
|
||||
return nil, c.validateCgroupSubsystems(spec.Cgroups, subsystems)
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Validator = &DockerValidator{}
|
||||
@ -51,11 +52,11 @@ func (d *DockerValidator) Validate(spec SysSpec) (error, error) {
|
||||
|
||||
c, err := client.NewClient(dockerEndpoint, "", nil, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create docker client: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to create docker client")
|
||||
}
|
||||
info, err := c.Info(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker info: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to get docker info")
|
||||
}
|
||||
return d.validateDockerInfo(spec.RuntimeSpec.DockerSpec, info)
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Validator = &OSValidator{}
|
||||
@ -35,7 +37,7 @@ func (o *OSValidator) Name() string {
|
||||
func (o *OSValidator) Validate(spec SysSpec) (error, error) {
|
||||
os, err := exec.Command("uname").CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get os name: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to get os name")
|
||||
}
|
||||
return nil, o.validateOS(strings.TrimSpace(string(os)), spec.OS)
|
||||
}
|
||||
|
@ -18,8 +18,9 @@ package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseTemplate validates and parses passed as argument template
|
||||
@ -27,11 +28,11 @@ func ParseTemplate(strtmpl string, obj interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
tmpl, err := template.New("template").Parse(strtmpl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when parsing template: %v", err)
|
||||
return nil, errors.Wrap(err, "error when parsing template")
|
||||
}
|
||||
err = tmpl.Execute(&buf, obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when executing template: %v", err)
|
||||
return nil, errors.Wrap(err, "error when executing template")
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user