Merge pull request #72517 from fabriziopandini/error-imports-cleanups

kubeadm: imports cleanup
This commit is contained in:
Kubernetes Prow Robot 2019-01-03 05:48:54 -08:00 committed by GitHub
commit 716b253963
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 98 additions and 85 deletions

View File

@ -18,11 +18,11 @@ package alpha
import ( import (
"bufio" "bufio"
"errors"
"fmt" "fmt"
"io" "io"
"strings" "strings"
"github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"

View File

@ -17,9 +17,10 @@ limitations under the License.
package phases package phases
import ( import (
"errors"
"fmt" "fmt"
"github.com/pkg/errors"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"

View File

@ -17,9 +17,9 @@ limitations under the License.
package workflow package workflow
import ( import (
"errors"
"fmt" "fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View File

@ -225,7 +225,7 @@ func getEtcdDataDir(manifestPath string, client clientset.Interface) (string, er
} }
} }
if dataDir == "" { if dataDir == "" {
return dataDir, fmt.Errorf("invalid etcd pod manifest") return dataDir, errors.New("invalid etcd pod manifest")
} }
return dataDir, nil return dataDir, nil
} }

View File

@ -25,7 +25,7 @@ import (
"testing" "testing"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -97,7 +97,7 @@ func TestRunCreateToken(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewNotFound(v1.Resource("secrets"), "foo") return true, nil, apierrors.NewNotFound(v1.Resource("secrets"), "foo")
}) })
testCases := []struct { testCases := []struct {

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"

View File

@ -21,6 +21,7 @@ import (
"io/ioutil" "io/ioutil"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -23,6 +23,7 @@ import (
"github.com/mholt/caddy/caddyfile" "github.com/mholt/caddy/caddyfile"
"github.com/pkg/errors" "github.com/pkg/errors"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1" rbac "k8s.io/api/rbac/v1"

View File

@ -130,7 +130,7 @@ func CreateCACertAndKeyFiles(certSpec *KubeadmCert, cfg *kubeadmapi.InitConfigur
func NewCSR(certSpec *KubeadmCert, cfg *kubeadmapi.InitConfiguration) (*x509.CertificateRequest, *rsa.PrivateKey, error) { func NewCSR(certSpec *KubeadmCert, cfg *kubeadmapi.InitConfiguration) (*x509.CertificateRequest, *rsa.PrivateKey, error) {
certConfig, err := certSpec.GetConfig(cfg) certConfig, err := certSpec.GetConfig(cfg)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to retrieve cert configuration: %v", err) return nil, nil, errors.Wrap(err, "failed to retrieve cert configuration")
} }
return pkiutil.NewCSRAndKey(certConfig) return pkiutil.NewCSRAndKey(certConfig)

View File

@ -54,6 +54,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library",
], ],
) )

View File

@ -23,6 +23,7 @@ import (
"path/filepath" "path/filepath"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1" rbac "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"

View File

@ -18,12 +18,13 @@ package kubelet
import ( import (
"context" "context"
"errors"
"io" "io"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
"github.com/pkg/errors"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/utils/exec" "k8s.io/utils/exec"

View File

@ -23,10 +23,11 @@ import (
"path/filepath" "path/filepath"
"time" "time"
pkgerrors "github.com/pkg/errors" "github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
@ -61,7 +62,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
// Create the new, version-branched kubelet ComponentConfig ConfigMap // Create the new, version-branched kubelet ComponentConfig ConfigMap
if err := kubeletphase.CreateConfigMap(cfg, client); err != nil { if err := kubeletphase.CreateConfigMap(cfg, client); err != nil {
errs = append(errs, pkgerrors.Wrap(err, "error creating kubelet configuration ConfigMap")) errs = append(errs, errors.Wrap(err, "error creating kubelet configuration ConfigMap"))
} }
// Write the new kubelet config down to disk and the env file if needed // Write the new kubelet config down to disk and the env file if needed
@ -73,7 +74,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
// --cri-socket. // --cri-socket.
// TODO: In the future we want to use something more official like NodeStatus or similar for detecting this properly // TODO: In the future we want to use something more official like NodeStatus or similar for detecting this properly
if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil { if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil {
errs = append(errs, pkgerrors.Wrap(err, "error uploading crisocket")) errs = append(errs, errors.Wrap(err, "error uploading crisocket"))
} }
// Create/update RBAC rules that makes the bootstrap tokens able to post CSRs // Create/update RBAC rules that makes the bootstrap tokens able to post CSRs
@ -118,7 +119,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
if err := proxy.EnsureProxyAddon(cfg, client); err != nil { if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
return errors.NewAggregate(errs) return errorsutil.NewAggregate(errs)
} }
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.InitConfiguration, client clientset.Interface, dryRun bool) error { func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.InitConfiguration, client clientset.Interface, dryRun bool) error {
@ -138,7 +139,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.InitConfiguration,
return err return err
} }
if dnsDeployment.Status.ReadyReplicas == 0 { if dnsDeployment.Status.ReadyReplicas == 0 {
return pkgerrors.New("the DNS deployment isn't ready yet") return errors.New("the DNS deployment isn't ready yet")
} }
} }
@ -158,7 +159,7 @@ func BackupAPIServerCertIfNeeded(cfg *kubeadmapi.InitConfiguration, dryRun bool)
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir) shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir)
if err != nil { if err != nil {
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key. // Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
return pkgerrors.Wrap(err, "[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key") return errors.Wrap(err, "[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key")
} }
if !shouldBackup { if !shouldBackup {
@ -196,7 +197,7 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon
// *would* post the new kubelet-config-1.X configmap that doesn't exist now when we're trying to download it // *would* post the new kubelet-config-1.X configmap that doesn't exist now when we're trying to download it
// again. // again.
if !(apierrors.IsNotFound(err) && dryRun) { if !(apierrors.IsNotFound(err) && dryRun) {
errs = append(errs, pkgerrors.Wrap(err, "error downloading kubelet configuration from the ConfigMap")) errs = append(errs, errors.Wrap(err, "error downloading kubelet configuration from the ConfigMap"))
} }
} }
@ -210,14 +211,14 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon
// as we handle that ourselves in the markmaster phase // as we handle that ourselves in the markmaster phase
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase? // TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
if err := kubeletphase.WriteKubeletDynamicEnvFile(cfg, false, kubeletDir); err != nil { if err := kubeletphase.WriteKubeletDynamicEnvFile(cfg, false, kubeletDir); err != nil {
errs = append(errs, pkgerrors.Wrap(err, "error writing a dynamic environment file for the kubelet")) errs = append(errs, errors.Wrap(err, "error writing a dynamic environment file for the kubelet"))
} }
if dryRun { // Print what contents would be written if dryRun { // Print what contents would be written
dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletEnvFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout) dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletEnvFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout)
} }
} }
return errors.NewAggregate(errs) return errorsutil.NewAggregate(errs)
} }
// getKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not. // getKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not.
@ -233,7 +234,7 @@ func getKubeletDir(dryRun bool) (string, error) {
func backupAPIServerCertAndKey(certAndKeyDir string) error { func backupAPIServerCertAndKey(certAndKeyDir string) error {
subDir := filepath.Join(certAndKeyDir, "expired") subDir := filepath.Join(certAndKeyDir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil { if err := os.Mkdir(subDir, 0766); err != nil {
return pkgerrors.Wrapf(err, "failed to created backup directory %s", subDir) return errors.Wrapf(err, "failed to created backup directory %s", subDir)
} }
filesToMove := map[string]string{ filesToMove := map[string]string{
@ -263,7 +264,7 @@ func rollbackFiles(files map[string]string, originalErr error) error {
errs = append(errs, err) errs = append(errs, err)
} }
} }
return pkgerrors.Errorf("couldn't move these files: %v. Got errors: %v", files, errors.NewAggregate(errs)) return errors.Errorf("couldn't move these files: %v. Got errors: %v", files, errorsutil.NewAggregate(errs))
} }
// shouldBackupAPIServerCertAndKey checks if the cert of kube-apiserver will be expired in 180 days. // shouldBackupAPIServerCertAndKey checks if the cert of kube-apiserver will be expired in 180 days.
@ -271,10 +272,10 @@ func shouldBackupAPIServerCertAndKey(certAndKeyDir string) (bool, error) {
apiServerCert := filepath.Join(certAndKeyDir, kubeadmconstants.APIServerCertName) apiServerCert := filepath.Join(certAndKeyDir, kubeadmconstants.APIServerCertName)
certs, err := certutil.CertsFromFile(apiServerCert) certs, err := certutil.CertsFromFile(apiServerCert)
if err != nil { if err != nil {
return false, pkgerrors.Wrapf(err, "couldn't load the certificate file %s", apiServerCert) return false, errors.Wrapf(err, "couldn't load the certificate file %s", apiServerCert)
} }
if len(certs) == 0 { if len(certs) == 0 {
return false, pkgerrors.New("no certificate data found") return false, errors.New("no certificate data found")
} }
if time.Now().Sub(certs[0].NotBefore) > expiry { if time.Now().Sub(certs[0].NotBefore) > expiry {

View File

@ -17,13 +17,14 @@ limitations under the License.
package upgrade package upgrade
import ( import (
"errors"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/pkg/errors"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"

View File

@ -20,6 +20,8 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -29,8 +31,6 @@ import (
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"github.com/pkg/errors"
) )
// ClientBackedDryRunGetter implements the DryRunGetter interface for use in NewDryRunClient() and proxies all GET and LIST requests to the backing API server reachable via rest.Config // ClientBackedDryRunGetter implements the DryRunGetter interface for use in NewDryRunClient() and proxies all GET and LIST requests to the backing API server reachable via rest.Config

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"github.com/pkg/errors" "github.com/pkg/errors"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1" rbac "k8s.io/api/rbac/v1"

View File

@ -23,6 +23,7 @@ import (
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"

View File

@ -24,7 +24,7 @@ import (
"time" "time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
) )
@ -76,7 +76,7 @@ func PrintDryRunFiles(files []FileToPrint, w io.Writer) error {
fmt.Fprintf(w, "[dryrun] Would write file %q with content:\n", outputFilePath) fmt.Fprintf(w, "[dryrun] Would write file %q with content:\n", outputFilePath)
apiclient.PrintBytesWithLinePrefix(w, fileBytes, "\t") apiclient.PrintBytesWithLinePrefix(w, fileBytes, "\t")
} }
return errors.NewAggregate(errs) return errorsutil.NewAggregate(errs)
} }
// Waiter is an implementation of apiclient.Waiter that should be used for dry-running // Waiter is an implementation of apiclient.Waiter that should be used for dry-running

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"strings" "strings"
utilerrors "k8s.io/apimachinery/pkg/util/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors"
) )
const ( const (
@ -69,7 +69,7 @@ func checkErr(err error, handleErr func(string, int)) {
return return
case preflightError: case preflightError:
handleErr(err.Error(), PreFlightExitCode) handleErr(err.Error(), PreFlightExitCode)
case utilerrors.Aggregate: case errorsutil.Aggregate:
handleErr(err.Error(), ValidationExitCode) handleErr(err.Error(), ValidationExitCode)
default: default:

View File

@ -21,13 +21,13 @@ import (
"bytes" "bytes"
"io" "io"
pkgerrors "github.com/pkg/errors" "github.com/pkg/errors"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors"
utilyaml "k8s.io/apimachinery/pkg/util/yaml" utilyaml "k8s.io/apimachinery/pkg/util/yaml"
clientsetscheme "k8s.io/client-go/kubernetes/scheme" clientsetscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -45,7 +45,7 @@ func MarshalToYamlForCodecs(obj runtime.Object, gv schema.GroupVersion, codecs s
mediaType := "application/yaml" mediaType := "application/yaml"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok { if !ok {
return []byte{}, pkgerrors.Errorf("unsupported media type %q", mediaType) return []byte{}, errors.Errorf("unsupported media type %q", mediaType)
} }
encoder := codecs.EncoderForVersion(info.Serializer, gv) encoder := codecs.EncoderForVersion(info.Serializer, gv)
@ -64,7 +64,7 @@ func UnmarshalFromYamlForCodecs(buffer []byte, gv schema.GroupVersion, codecs se
mediaType := "application/yaml" mediaType := "application/yaml"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok { if !ok {
return nil, pkgerrors.Errorf("unsupported media type %q", mediaType) return nil, errors.Errorf("unsupported media type %q", mediaType)
} }
decoder := codecs.DecoderToVersion(info.Serializer, gv) decoder := codecs.DecoderToVersion(info.Serializer, gv)
@ -97,12 +97,12 @@ func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, e
} }
// Require TypeMeta information to be present // Require TypeMeta information to be present
if len(typeMetaInfo.APIVersion) == 0 || len(typeMetaInfo.Kind) == 0 { if len(typeMetaInfo.APIVersion) == 0 || len(typeMetaInfo.Kind) == 0 {
errs = append(errs, pkgerrors.New("invalid configuration: kind and apiVersion is mandatory information that needs to be specified in all YAML documents")) errs = append(errs, errors.New("invalid configuration: kind and apiVersion is mandatory information that needs to be specified in all YAML documents"))
continue continue
} }
// Check whether the kind has been registered before. If it has, throw an error // Check whether the kind has been registered before. If it has, throw an error
if known := knownKinds[typeMetaInfo.Kind]; known { if known := knownKinds[typeMetaInfo.Kind]; known {
errs = append(errs, pkgerrors.Errorf("invalid configuration: kind %q is specified twice in YAML file", typeMetaInfo.Kind)) errs = append(errs, errors.Errorf("invalid configuration: kind %q is specified twice in YAML file", typeMetaInfo.Kind))
continue continue
} }
knownKinds[typeMetaInfo.Kind] = true knownKinds[typeMetaInfo.Kind] = true
@ -110,7 +110,7 @@ func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, e
// Build a GroupVersionKind object from the deserialized TypeMeta object // Build a GroupVersionKind object from the deserialized TypeMeta object
gv, err := schema.ParseGroupVersion(typeMetaInfo.APIVersion) gv, err := schema.ParseGroupVersion(typeMetaInfo.APIVersion)
if err != nil { if err != nil {
errs = append(errs, pkgerrors.Wrap(err, "unable to parse apiVersion")) errs = append(errs, errors.Wrap(err, "unable to parse apiVersion"))
continue continue
} }
gvk := gv.WithKind(typeMetaInfo.Kind) gvk := gv.WithKind(typeMetaInfo.Kind)
@ -118,7 +118,7 @@ func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, e
// Save the mapping between the gvk and the bytes that object consists of // Save the mapping between the gvk and the bytes that object consists of
gvkmap[gvk] = b gvkmap[gvk] = b
} }
if err := errors.NewAggregate(errs); err != nil { if err := errorsutil.NewAggregate(errs); err != nil {
return nil, err return nil, err
} }
return gvkmap, nil return gvkmap, nil

View File

@ -459,12 +459,12 @@ func EncodeCSRPEM(csr *x509.CertificateRequest) []byte {
func parseCSRPEM(pemCSR []byte) (*x509.CertificateRequest, error) { func parseCSRPEM(pemCSR []byte) (*x509.CertificateRequest, error) {
block, _ := pem.Decode(pemCSR) block, _ := pem.Decode(pemCSR)
if block == nil { if block == nil {
return nil, fmt.Errorf("data doesn't contain a valid certificate request") return nil, errors.New("data doesn't contain a valid certificate request")
} }
if block.Type != certutil.CertificateRequestBlockType { if block.Type != certutil.CertificateRequestBlockType {
var block *pem.Block var block *pem.Block
return nil, fmt.Errorf("expected block type %q, but PEM had type %v", certutil.CertificateRequestBlockType, block.Type) return nil, errors.Errorf("expected block type %q, but PEM had type %v", certutil.CertificateRequestBlockType, block.Type)
} }
return x509.ParseCertificateRequest(block.Bytes) return x509.ParseCertificateRequest(block.Bytes)
@ -480,7 +480,7 @@ func CertificateRequestFromFile(file string) (*x509.CertificateRequest, error) {
csr, err := parseCSRPEM(pemBlock) csr, err := parseCSRPEM(pemBlock)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading certificate request file %s: %v", file, err) return nil, errors.Wrapf(err, "error reading certificate request file %s", file)
} }
return csr, nil return csr, nil
} }

View File

@ -21,9 +21,9 @@ import (
goruntime "runtime" goruntime "runtime"
"strings" "strings"
pkgerrors "github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
utilsexec "k8s.io/utils/exec" utilsexec "k8s.io/utils/exec"
) )
@ -69,7 +69,7 @@ func NewContainerRuntime(execer utilsexec.Interface, criSocket string) (Containe
} }
if _, err := execer.LookPath(toolName); err != nil { if _, err := execer.LookPath(toolName); err != nil {
return nil, pkgerrors.Wrapf(err, "%s is required for container runtime", toolName) return nil, errors.Wrapf(err, "%s is required for container runtime", toolName)
} }
return runtime, nil return runtime, nil
@ -88,7 +88,7 @@ func (runtime *DockerRuntime) IsDocker() bool {
// IsRunning checks if runtime is running // IsRunning checks if runtime is running
func (runtime *CRIRuntime) IsRunning() error { func (runtime *CRIRuntime) IsRunning() error {
if out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "info").CombinedOutput(); err != nil { if out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "info").CombinedOutput(); err != nil {
return pkgerrors.Wrapf(err, "container runtime is not running: output: %s, error", string(out)) return errors.Wrapf(err, "container runtime is not running: output: %s, error", string(out))
} }
return nil return nil
} }
@ -96,7 +96,7 @@ func (runtime *CRIRuntime) IsRunning() error {
// IsRunning checks if runtime is running // IsRunning checks if runtime is running
func (runtime *DockerRuntime) IsRunning() error { func (runtime *DockerRuntime) IsRunning() error {
if out, err := runtime.exec.Command("docker", "info").CombinedOutput(); err != nil { if out, err := runtime.exec.Command("docker", "info").CombinedOutput(); err != nil {
return pkgerrors.Wrapf(err, "container runtime is not running: output: %s, error", string(out)) return errors.Wrapf(err, "container runtime is not running: output: %s, error", string(out))
} }
return nil return nil
} }
@ -105,7 +105,7 @@ func (runtime *DockerRuntime) IsRunning() error {
func (runtime *CRIRuntime) ListKubeContainers() ([]string, error) { func (runtime *CRIRuntime) ListKubeContainers() ([]string, error) {
out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "pods", "-q").CombinedOutput() out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "pods", "-q").CombinedOutput()
if err != nil { if err != nil {
return nil, pkgerrors.Wrapf(err, "output: %s, error", string(out)) return nil, errors.Wrapf(err, "output: %s, error", string(out))
} }
pods := []string{} pods := []string{}
for _, pod := range strings.Fields(string(out)) { for _, pod := range strings.Fields(string(out)) {
@ -127,15 +127,15 @@ func (runtime *CRIRuntime) RemoveContainers(containers []string) error {
out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "stopp", container).CombinedOutput() out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "stopp", container).CombinedOutput()
if err != nil { if err != nil {
// don't stop on errors, try to remove as many containers as possible // don't stop on errors, try to remove as many containers as possible
errs = append(errs, pkgerrors.Wrapf(err, "failed to stop running pod %s: output: %s, error", container, string(out))) errs = append(errs, errors.Wrapf(err, "failed to stop running pod %s: output: %s, error", container, string(out)))
} else { } else {
out, err = runtime.exec.Command("crictl", "-r", runtime.criSocket, "rmp", container).CombinedOutput() out, err = runtime.exec.Command("crictl", "-r", runtime.criSocket, "rmp", container).CombinedOutput()
if err != nil { if err != nil {
errs = append(errs, pkgerrors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out))) errs = append(errs, errors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out)))
} }
} }
} }
return errors.NewAggregate(errs) return errorsutil.NewAggregate(errs)
} }
// RemoveContainers removes running containers // RemoveContainers removes running containers
@ -145,17 +145,17 @@ func (runtime *DockerRuntime) RemoveContainers(containers []string) error {
out, err := runtime.exec.Command("docker", "rm", "--force", "--volumes", container).CombinedOutput() out, err := runtime.exec.Command("docker", "rm", "--force", "--volumes", container).CombinedOutput()
if err != nil { if err != nil {
// don't stop on errors, try to remove as many containers as possible // don't stop on errors, try to remove as many containers as possible
errs = append(errs, pkgerrors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out))) errs = append(errs, errors.Wrapf(err, "failed to remove running container %s: output: %s, error", container, string(out)))
} }
} }
return errors.NewAggregate(errs) return errorsutil.NewAggregate(errs)
} }
// PullImage pulls the image // PullImage pulls the image
func (runtime *CRIRuntime) PullImage(image string) error { func (runtime *CRIRuntime) PullImage(image string) error {
out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "pull", image).CombinedOutput() out, err := runtime.exec.Command("crictl", "-r", runtime.criSocket, "pull", image).CombinedOutput()
if err != nil { if err != nil {
return pkgerrors.Wrapf(err, "output: %s, error", string(out)) return errors.Wrapf(err, "output: %s, error", string(out))
} }
return nil return nil
} }
@ -164,7 +164,7 @@ func (runtime *CRIRuntime) PullImage(image string) error {
func (runtime *DockerRuntime) PullImage(image string) error { func (runtime *DockerRuntime) PullImage(image string) error {
out, err := runtime.exec.Command("docker", "pull", image).CombinedOutput() out, err := runtime.exec.Command("docker", "pull", image).CombinedOutput()
if err != nil { if err != nil {
return pkgerrors.Wrapf(err, "output: %s, error", string(out)) return errors.Wrapf(err, "output: %s, error", string(out))
} }
return nil return nil
} }

View File

@ -29,10 +29,10 @@ import (
"regexp" "regexp"
"strings" "strings"
pkgerrors "github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog"
) )
var _ Validator = &KernelValidator{} var _ Validator = &KernelValidator{}
@ -66,7 +66,7 @@ func (k *KernelValidator) Validate(spec SysSpec) (error, error) {
helper := KernelValidatorHelperImpl{} helper := KernelValidatorHelperImpl{}
release, err := helper.GetKernelReleaseVersion() release, err := helper.GetKernelReleaseVersion()
if err != nil { if err != nil {
return nil, pkgerrors.Wrap(err, "failed to get kernel release") return nil, errors.Wrap(err, "failed to get kernel release")
} }
k.kernelRelease = release k.kernelRelease = release
var errs []error var errs []error
@ -75,7 +75,7 @@ func (k *KernelValidator) Validate(spec SysSpec) (error, error) {
if len(spec.KernelSpec.Required) > 0 || len(spec.KernelSpec.Forbidden) > 0 || len(spec.KernelSpec.Optional) > 0 { if len(spec.KernelSpec.Required) > 0 || len(spec.KernelSpec.Forbidden) > 0 || len(spec.KernelSpec.Optional) > 0 {
errs = append(errs, k.validateKernelConfig(spec.KernelSpec)) errs = append(errs, k.validateKernelConfig(spec.KernelSpec))
} }
return nil, errors.NewAggregate(errs) return nil, errorsutil.NewAggregate(errs)
} }
// validateKernelVersion validates the kernel version. // validateKernelVersion validates the kernel version.
@ -89,14 +89,14 @@ func (k *KernelValidator) validateKernelVersion(kSpec KernelSpec) error {
} }
} }
k.Reporter.Report("KERNEL_VERSION", k.kernelRelease, bad) k.Reporter.Report("KERNEL_VERSION", k.kernelRelease, bad)
return pkgerrors.Errorf("unsupported kernel release: %s", k.kernelRelease) return errors.Errorf("unsupported kernel release: %s", k.kernelRelease)
} }
// validateKernelConfig validates the kernel configurations. // validateKernelConfig validates the kernel configurations.
func (k *KernelValidator) validateKernelConfig(kSpec KernelSpec) error { func (k *KernelValidator) validateKernelConfig(kSpec KernelSpec) error {
allConfig, err := k.getKernelConfig() allConfig, err := k.getKernelConfig()
if err != nil { if err != nil {
return pkgerrors.Wrap(err, "failed to parse kernel config") return errors.Wrap(err, "failed to parse kernel config")
} }
return k.validateCachedKernelConfig(allConfig, kSpec) return k.validateCachedKernelConfig(allConfig, kSpec)
} }
@ -165,7 +165,7 @@ func (k *KernelValidator) validateCachedKernelConfig(allConfig map[string]kConfi
validateOpt(config, forbidden) validateOpt(config, forbidden)
} }
if len(badConfigs) > 0 { if len(badConfigs) > 0 {
return pkgerrors.Errorf("unexpected kernel config: %s", strings.Join(badConfigs, " ")) return errors.Errorf("unexpected kernel config: %s", strings.Join(badConfigs, " "))
} }
return nil return nil
} }
@ -220,14 +220,14 @@ func (k *KernelValidator) getKernelConfigReader() (io.Reader, error) {
// config module and check again. // config module and check again.
output, err := exec.Command(modprobeCmd, configsModule).CombinedOutput() output, err := exec.Command(modprobeCmd, configsModule).CombinedOutput()
if err != nil { if err != nil {
return nil, pkgerrors.Wrapf(err, "unable to load kernel module: %q, output: %q, err", return nil, errors.Wrapf(err, "unable to load kernel module: %q, output: %q, err",
configsModule, output) configsModule, output)
} }
// Unload the kernel config module to make sure the validation have no side effect. // Unload the kernel config module to make sure the validation have no side effect.
defer exec.Command(modprobeCmd, "-r", configsModule).Run() defer exec.Command(modprobeCmd, "-r", configsModule).Run()
loadModule = true loadModule = true
} }
return nil, pkgerrors.Errorf("no config path in %v is available", possibePaths) return nil, errors.Errorf("no config path in %v is available", possibePaths)
} }
// getKernelConfig gets kernel config from kernel config file and convert kernel config to internal type. // getKernelConfig gets kernel config from kernel config file and convert kernel config to internal type.

View File

@ -22,10 +22,10 @@ import (
"os/exec" "os/exec"
"strings" "strings"
"k8s.io/apimachinery/pkg/util/errors"
"github.com/blang/semver" "github.com/blang/semver"
pkgerrors "github.com/pkg/errors" "github.com/pkg/errors"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog" "k8s.io/klog"
) )
@ -46,7 +46,7 @@ func newPackageManager() (packageManager, error) {
if m, ok := newDPKG(); ok { if m, ok := newDPKG(); ok {
return m, nil return m, nil
} }
return nil, pkgerrors.New("failed to find package manager") return nil, errors.New("failed to find package manager")
} }
// dpkg implements packageManager. It uses "dpkg-query" to retrieve package // dpkg implements packageManager. It uses "dpkg-query" to retrieve package
@ -68,11 +68,11 @@ func newDPKG() (packageManager, bool) {
func (dpkg) getPackageVersion(packageName string) (string, error) { func (dpkg) getPackageVersion(packageName string) (string, error) {
output, err := exec.Command("dpkg-query", "--show", "--showformat='${Version}'", packageName).Output() output, err := exec.Command("dpkg-query", "--show", "--showformat='${Version}'", packageName).Output()
if err != nil { if err != nil {
return "", pkgerrors.Wrap(err, "dpkg-query failed") return "", errors.Wrap(err, "dpkg-query failed")
} }
version := extractUpstreamVersion(string(output)) version := extractUpstreamVersion(string(output))
if version == "" { if version == "" {
return "", pkgerrors.New("no version information") return "", errors.New("no version information")
} }
return version, nil return version, nil
} }
@ -154,18 +154,18 @@ func (validator *packageValidator) validate(packageSpecs []PackageSpec, manager
if versionRange(sv) { if versionRange(sv) {
validator.reporter.Report(nameWithVerRange, version, good) validator.reporter.Report(nameWithVerRange, version, good)
} else { } else {
errs = append(errs, pkgerrors.Errorf("package \"%s %s\" does not meet the spec \"%s (%s)\"", packageName, sv, packageName, spec.VersionRange)) errs = append(errs, errors.Errorf("package \"%s %s\" does not meet the spec \"%s (%s)\"", packageName, sv, packageName, spec.VersionRange))
validator.reporter.Report(nameWithVerRange, version, bad) validator.reporter.Report(nameWithVerRange, version, bad)
} }
} }
return nil, errors.NewAggregate(errs) return nil, errorsutil.NewAggregate(errs)
} }
// getKernelRelease returns the kernel release of the local machine. // getKernelRelease returns the kernel release of the local machine.
func getKernelRelease() (string, error) { func getKernelRelease() (string, error) {
output, err := exec.Command("uname", "-r").Output() output, err := exec.Command("uname", "-r").Output()
if err != nil { if err != nil {
return "", pkgerrors.Wrap(err, "failed to get kernel release") return "", errors.Wrap(err, "failed to get kernel release")
} }
return strings.TrimSpace(string(output)), nil return strings.TrimSpace(string(output)), nil
} }
@ -175,7 +175,7 @@ func getOSDistro() (string, error) {
f := "/etc/lsb-release" f := "/etc/lsb-release"
b, err := ioutil.ReadFile(f) b, err := ioutil.ReadFile(f)
if err != nil { if err != nil {
return "", pkgerrors.Wrapf(err, "failed to read %q", f) return "", errors.Wrapf(err, "failed to read %q", f)
} }
content := string(b) content := string(b)
switch { switch {
@ -186,7 +186,7 @@ func getOSDistro() (string, error) {
case strings.Contains(content, "CoreOS"): case strings.Contains(content, "CoreOS"):
return "coreos", nil return "coreos", nil
default: default:
return "", pkgerrors.Errorf("failed to get OS distro: %s", content) return "", errors.Errorf("failed to get OS distro: %s", content)
} }
} }

View File

@ -18,7 +18,8 @@ package system
import ( import (
"fmt" "fmt"
"k8s.io/apimachinery/pkg/util/errors"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
) )
// Validator is the interface for all validators. // Validator is the interface for all validators.
@ -46,7 +47,7 @@ func Validate(spec SysSpec, validators []Validator) (error, error) {
errs = append(errs, err) errs = append(errs, err)
warns = append(warns, warn) warns = append(warns, warn)
} }
return errors.NewAggregate(warns), errors.NewAggregate(errs) return errorsutil.NewAggregate(warns), errorsutil.NewAggregate(errs)
} }
// ValidateSpec uses all default validators to validate the system and writes to stdout. // ValidateSpec uses all default validators to validate the system and writes to stdout.

View File

@ -17,7 +17,6 @@ limitations under the License.
package util package util
import ( import (
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -25,7 +24,8 @@ import (
"strings" "strings"
"time" "time"
pkgerrors "github.com/pkg/errors" "github.com/pkg/errors"
netutil "k8s.io/apimachinery/pkg/util/net" netutil "k8s.io/apimachinery/pkg/util/net"
versionutil "k8s.io/apimachinery/pkg/util/version" versionutil "k8s.io/apimachinery/pkg/util/version"
"k8s.io/klog" "k8s.io/klog"
@ -111,7 +111,7 @@ func KubernetesReleaseVersion(version string) (string, error) {
// Re-validate received version and return. // Re-validate received version and return.
return KubernetesReleaseVersion(body) return KubernetesReleaseVersion(body)
} }
return "", pkgerrors.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version) return "", errors.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version)
} }
// KubernetesVersionToImageTag is helper function that replaces all // KubernetesVersionToImageTag is helper function that replaces all
@ -152,7 +152,7 @@ func splitVersion(version string) (string, string, error) {
var urlSuffix string var urlSuffix string
subs := kubeBucketPrefixes.FindAllStringSubmatch(version, 1) subs := kubeBucketPrefixes.FindAllStringSubmatch(version, 1)
if len(subs) != 1 || len(subs[0]) != 4 { if len(subs) != 1 || len(subs[0]) != 4 {
return "", "", pkgerrors.Errorf("invalid version %q", version) return "", "", errors.Errorf("invalid version %q", version)
} }
switch { switch {
@ -172,12 +172,12 @@ func fetchFromURL(url string, timeout time.Duration) (string, error) {
client := &http.Client{Timeout: timeout, Transport: netutil.SetOldTransportDefaults(&http.Transport{})} client := &http.Client{Timeout: timeout, Transport: netutil.SetOldTransportDefaults(&http.Transport{})}
resp, err := client.Get(url) resp, err := client.Get(url)
if err != nil { if err != nil {
return "", pkgerrors.Errorf("unable to get URL %q: %s", url, err.Error()) return "", errors.Errorf("unable to get URL %q: %s", url, err.Error())
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return "", pkgerrors.Errorf("unable to read content of URL %q: %s", url, err.Error()) return "", errors.Errorf("unable to read content of URL %q: %s", url, err.Error())
} }
bodyString := strings.TrimSpace(string(body)) bodyString := strings.TrimSpace(string(body))
@ -192,7 +192,7 @@ func fetchFromURL(url string, timeout time.Duration) (string, error) {
func kubeadmVersion(info string) (string, error) { func kubeadmVersion(info string) (string, error) {
v, err := versionutil.ParseSemantic(info) v, err := versionutil.ParseSemantic(info)
if err != nil { if err != nil {
return "", pkgerrors.Wrap(err, "kubeadm version error") return "", errors.Wrap(err, "kubeadm version error")
} }
// There is no utility in versionutil to get the version without the metadata, // There is no utility in versionutil to get the version without the metadata,
// so this needs some manual formatting. // so this needs some manual formatting.
@ -226,11 +226,11 @@ func kubeadmVersion(info string) (string, error) {
func validateStableVersion(remoteVersion, clientVersion string) (string, error) { func validateStableVersion(remoteVersion, clientVersion string) (string, error) {
verRemote, err := versionutil.ParseGeneric(remoteVersion) verRemote, err := versionutil.ParseGeneric(remoteVersion)
if err != nil { if err != nil {
return "", pkgerrors.Wrap(err, "remote version error") return "", errors.Wrap(err, "remote version error")
} }
verClient, err := versionutil.ParseGeneric(clientVersion) verClient, err := versionutil.ParseGeneric(clientVersion)
if err != nil { if err != nil {
return "", pkgerrors.Wrap(err, "client version error") return "", errors.Wrap(err, "client version error")
} }
// If the remote Major version is bigger or if the Major versions are the same, // If the remote Major version is bigger or if the Major versions are the same,
// but the remote Minor is bigger use the client version release. This handles Major bumps too. // but the remote Minor is bigger use the client version release. This handles Major bumps too.