mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #80318 from davidxia/fix-err-caps
cleanup: fix some log and error capitalizations
This commit is contained in:
commit
bf2dd03083
@ -79,14 +79,14 @@ func mountInChroot(rootfsPath string, args []string) error {
|
|||||||
// Mount failed because it is NFS V3 and we need to run rpcBind
|
// Mount failed because it is NFS V3 and we need to run rpcBind
|
||||||
output, err = exec.Command(chrootCmd, rootfsPath, rpcBindCmd, "-w").CombinedOutput()
|
output, err = exec.Command(chrootCmd, rootfsPath, rpcBindCmd, "-w").CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err)
|
return fmt.Errorf("mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rpcbind is running, try mounting again
|
// Rpcbind is running, try mounting again
|
||||||
output, err = exec.Command(chrootCmd, args...).CombinedOutput()
|
output, err = exec.Command(chrootCmd, args...).CombinedOutput()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err)
|
return fmt.Errorf("mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -195,12 +195,12 @@ func (e *CombinedEtcdClient) AttachLease(leaseDuration time.Duration) error {
|
|||||||
defer v3client.Close()
|
defer v3client.Close()
|
||||||
objectsResp, err := v3client.KV.Get(ctx, ttlKeysPrefix, clientv3.WithPrefix())
|
objectsResp, err := v3client.KV.Get(ctx, ttlKeysPrefix, clientv3.WithPrefix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error while getting objects to attach to the lease")
|
return fmt.Errorf("error while getting objects to attach to the lease")
|
||||||
}
|
}
|
||||||
|
|
||||||
lease, err := v3client.Lease.Grant(ctx, int64(leaseDuration/time.Second))
|
lease, err := v3client.Lease.Grant(ctx, int64(leaseDuration/time.Second))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error while creating lease: %v", err)
|
return fmt.Errorf("error while creating lease: %v", err)
|
||||||
}
|
}
|
||||||
klog.Infof("Lease with TTL: %v created", lease.TTL)
|
klog.Infof("Lease with TTL: %v created", lease.TTL)
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ func (r *EtcdMigrateServer) Start(version *EtcdVersion) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error killing etcd: %v", err)
|
return fmt.Errorf("error killing etcd: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Timed out waiting for etcd on port %d", r.cfg.port)
|
return fmt.Errorf("timed out waiting for etcd on port %d", r.cfg.port)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ type EtcdVersionPair struct {
|
|||||||
func ParseEtcdVersionPair(s string) (*EtcdVersionPair, error) {
|
func ParseEtcdVersionPair(s string) (*EtcdVersionPair, error) {
|
||||||
parts := strings.Split(s, "/")
|
parts := strings.Split(s, "/")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil, fmt.Errorf("Malformed version file, expected <major>.<minor>.<patch>/<storage> but got %s", s)
|
return nil, fmt.Errorf("malformed version file, expected <major>.<minor>.<patch>/<storage> but got %s", s)
|
||||||
}
|
}
|
||||||
version, err := ParseEtcdVersion(parts[0])
|
version, err := ParseEtcdVersion(parts[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -393,7 +393,7 @@ func TestConfigChange(t *testing.T) {
|
|||||||
setUp := func() (*os.File, string, error) {
|
setUp := func() (*os.File, string, error) {
|
||||||
tempDir, err := ioutil.TempDir("", "kubeproxy-config-change")
|
tempDir, err := ioutil.TempDir("", "kubeproxy-config-change")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", fmt.Errorf("Unable to create temporary directory: %v", err)
|
return nil, "", fmt.Errorf("unable to create temporary directory: %v", err)
|
||||||
}
|
}
|
||||||
fullPath := filepath.Join(tempDir, "kube-proxy-config")
|
fullPath := filepath.Join(tempDir, "kube-proxy-config")
|
||||||
file, err := os.Create(fullPath)
|
file, err := os.Create(fullPath)
|
||||||
|
@ -138,10 +138,10 @@ func newCmdKubeletConfigEnableDynamic() *cobra.Command {
|
|||||||
Example: kubeletConfigEnableDynamicExample,
|
Example: kubeletConfigEnableDynamicExample,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
if len(nodeName) == 0 {
|
if len(nodeName) == 0 {
|
||||||
kubeadmutil.CheckErr(errors.New("The --node-name argument is required"))
|
kubeadmutil.CheckErr(errors.New("the --node-name argument is required"))
|
||||||
}
|
}
|
||||||
if len(kubeletVersionStr) == 0 {
|
if len(kubeletVersionStr) == 0 {
|
||||||
kubeadmutil.CheckErr(errors.New("The --kubelet-version argument is required"))
|
kubeadmutil.CheckErr(errors.New("the --kubelet-version argument is required"))
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeletVersion, err := version.ParseSemantic(kubeletVersionStr)
|
kubeletVersion, err := version.ParseSemantic(kubeletVersionStr)
|
||||||
|
@ -244,7 +244,7 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command {
|
|||||||
`), kubeadmapiv1beta2.SchemeGroupVersion, kubeadmapiv1beta2.SchemeGroupVersion),
|
`), kubeadmapiv1beta2.SchemeGroupVersion, kubeadmapiv1beta2.SchemeGroupVersion),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
if len(oldCfgPath) == 0 {
|
if len(oldCfgPath) == 0 {
|
||||||
kubeadmutil.CheckErr(errors.New("The --old-config flag is mandatory"))
|
kubeadmutil.CheckErr(errors.New("the --old-config flag is mandatory"))
|
||||||
}
|
}
|
||||||
|
|
||||||
oldCfgBytes, err := ioutil.ReadFile(oldCfgPath)
|
oldCfgBytes, err := ioutil.ReadFile(oldCfgPath)
|
||||||
@ -321,7 +321,7 @@ func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Co
|
|||||||
`), metav1.NamespaceSystem, constants.KubeadmConfigConfigMap),
|
`), metav1.NamespaceSystem, constants.KubeadmConfigConfigMap),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
if len(cfgPath) == 0 {
|
if len(cfgPath) == 0 {
|
||||||
kubeadmutil.CheckErr(errors.New("The --config flag is mandatory"))
|
kubeadmutil.CheckErr(errors.New("the --config flag is mandatory"))
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(1).Infoln("[config] retrieving ClientSet from file")
|
klog.V(1).Infoln("[config] retrieving ClientSet from file")
|
||||||
|
@ -58,7 +58,7 @@ func runPreflight(c workflow.RunData) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if strings.ToLower(s.Text()) != "y" {
|
if strings.ToLower(s.Text()) != "y" {
|
||||||
return errors.New("Aborted reset operation")
|
return errors.New("aborted reset operation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,7 +428,7 @@ func EtcdSupportedVersion(versionString string) (*version.Version, error) {
|
|||||||
}
|
}
|
||||||
return etcdVersion, nil
|
return etcdVersion, nil
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("Unsupported or unknown Kubernetes version(%v)", kubernetesVersion)
|
return nil, errors.Errorf("unsupported or unknown Kubernetes version(%v)", kubernetesVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStaticPodDirectory returns the location on the disk where the Static Pod should be present
|
// GetStaticPodDirectory returns the location on the disk where the Static Pod should be present
|
||||||
|
@ -157,7 +157,7 @@ func TestEtcdSupportedVersion(t *testing.T) {
|
|||||||
{
|
{
|
||||||
kubernetesVersion: "1.99.0",
|
kubernetesVersion: "1.99.0",
|
||||||
expectedVersion: nil,
|
expectedVersion: nil,
|
||||||
expectedError: errors.New("Unsupported or unknown Kubernetes version(1.99.0)"),
|
expectedError: errors.New("unsupported or unknown Kubernetes version(1.99.0)"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
kubernetesVersion: MinimumControlPlaneVersion.WithPatch(1).String(),
|
kubernetesVersion: MinimumControlPlaneVersion.WithPatch(1).String(),
|
||||||
|
@ -132,7 +132,7 @@ func waitForItemsFromChan(timeoutChan <-chan time.Time, stringChan chan string,
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timeoutChan:
|
case <-timeoutChan:
|
||||||
return errors.New("The prepull operation timed out")
|
return errors.New("the prepull operation timed out")
|
||||||
case result := <-stringChan:
|
case result := <-stringChan:
|
||||||
i++
|
i++
|
||||||
// If the cleanup function errors; error here as well
|
// If the cleanup function errors; error here as well
|
||||||
|
@ -158,7 +158,7 @@ func TestMutateConfigMapWithConflict(t *testing.T) {
|
|||||||
update := action.(core.UpdateAction)
|
update := action.(core.UpdateAction)
|
||||||
if conflict > 0 {
|
if conflict > 0 {
|
||||||
conflict--
|
conflict--
|
||||||
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("Conflict"))
|
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("conflict"))
|
||||||
}
|
}
|
||||||
return false, update.GetObject(), nil
|
return false, update.GetObject(), nil
|
||||||
})
|
})
|
||||||
|
@ -98,10 +98,10 @@ func BuildAuthz(client authorizationclient.SubjectAccessReviewInterface, authz k
|
|||||||
return authorizerConfig.New()
|
return authorizerConfig.New()
|
||||||
|
|
||||||
case "":
|
case "":
|
||||||
return nil, fmt.Errorf("No authorization mode specified")
|
return nil, fmt.Errorf("no authorization mode specified")
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unknown authorization mode %s", authz.Mode)
|
return nil, fmt.Errorf("unknown authorization mode %s", authz.Mode)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -418,7 +418,7 @@ func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
|
|||||||
|
|
||||||
func checkPermissions() error {
|
func checkPermissions() error {
|
||||||
if uid := os.Getuid(); uid != 0 {
|
if uid := os.Getuid(); uid != 0 {
|
||||||
return fmt.Errorf("Kubelet needs to run as uid `0`. It is being run as %d", uid)
|
return fmt.Errorf("kubelet needs to run as uid `0`. It is being run as %d", uid)
|
||||||
}
|
}
|
||||||
// TODO: Check if kubelet is running in the `initial` user namespace.
|
// TODO: Check if kubelet is running in the `initial` user namespace.
|
||||||
// http://man7.org/linux/man-pages/man7/user_namespaces.7.html
|
// http://man7.org/linux/man-pages/man7/user_namespaces.7.html
|
||||||
|
@ -153,7 +153,7 @@ func (t tester) getKubetest(get bool, old time.Duration) (string, error) {
|
|||||||
log.Printf("The kubetest binary is older than %s.", old)
|
log.Printf("The kubetest binary is older than %s.", old)
|
||||||
}
|
}
|
||||||
if t.goPath == "" {
|
if t.goPath == "" {
|
||||||
return "", fmt.Errorf("Cannot install kubetest until $GOPATH is set")
|
return "", fmt.Errorf("cannot install kubetest until $GOPATH is set")
|
||||||
}
|
}
|
||||||
log.Print("Updating kubetest binary...")
|
log.Print("Updating kubetest binary...")
|
||||||
cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"}
|
cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"}
|
||||||
|
@ -138,10 +138,10 @@ func TestParse(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLook(t *testing.T) {
|
func TestLook(t *testing.T) {
|
||||||
lpf := errors.New("LookPath failed")
|
lpf := errors.New("lookPath failed")
|
||||||
sf := errors.New("Stat failed")
|
sf := errors.New("stat failed")
|
||||||
lpnc := errors.New("LookPath should not be called")
|
lpnc := errors.New("lookPath should not be called")
|
||||||
snc := errors.New("Stat should not be called")
|
snc := errors.New("stat should not be called")
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
stat error
|
stat error
|
||||||
lookPath error
|
lookPath error
|
||||||
@ -324,7 +324,7 @@ func TestGetKubetest(t *testing.T) {
|
|||||||
stat: func(p string) (os.FileInfo, error) {
|
stat: func(p string) (os.FileInfo, error) {
|
||||||
// stat
|
// stat
|
||||||
if p != c.stat {
|
if p != c.stat {
|
||||||
return nil, fmt.Errorf("Failed to find %s", p)
|
return nil, fmt.Errorf("failed to find %s", p)
|
||||||
}
|
}
|
||||||
return FileInfo{time.Now().Add(c.age * -1)}, nil
|
return FileInfo{time.Now().Add(c.age * -1)}, nil
|
||||||
},
|
},
|
||||||
@ -332,7 +332,7 @@ func TestGetKubetest(t *testing.T) {
|
|||||||
if c.path {
|
if c.path {
|
||||||
return filepath.Join(p, name), nil
|
return filepath.Join(p, name), nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("Not on path: %s", name)
|
return "", fmt.Errorf("not on path: %s", name)
|
||||||
},
|
},
|
||||||
goPath: c.goPath,
|
goPath: c.goPath,
|
||||||
wait: func(cmd string, args ...string) error {
|
wait: func(cmd string, args ...string) error {
|
||||||
|
@ -114,17 +114,17 @@ func getJSONValue(data map[string]interface{}, keys ...string) (interface{}, boo
|
|||||||
// Look up the value
|
// Look up the value
|
||||||
value, ok := data[key]
|
value, ok := data[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, fmt.Errorf("No key %s found", key)
|
return nil, false, fmt.Errorf("no key %s found", key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the indexed value if an index is specified
|
// Get the indexed value if an index is specified
|
||||||
if index >= 0 {
|
if index >= 0 {
|
||||||
valueSlice, ok := value.([]interface{})
|
valueSlice, ok := value.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, fmt.Errorf("Key %s did not hold a slice", key)
|
return nil, false, fmt.Errorf("key %s did not hold a slice", key)
|
||||||
}
|
}
|
||||||
if index >= len(valueSlice) {
|
if index >= len(valueSlice) {
|
||||||
return nil, false, fmt.Errorf("Index %d out of bounds for slice at key: %v", index, key)
|
return nil, false, fmt.Errorf("index %d out of bounds for slice at key: %v", index, key)
|
||||||
}
|
}
|
||||||
value = valueSlice[index]
|
value = valueSlice[index]
|
||||||
}
|
}
|
||||||
@ -135,7 +135,7 @@ func getJSONValue(data map[string]interface{}, keys ...string) (interface{}, boo
|
|||||||
|
|
||||||
childData, ok := value.(map[string]interface{})
|
childData, ok := value.(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, fmt.Errorf("Key %s did not hold a map", keys[0])
|
return nil, false, fmt.Errorf("key %s did not hold a map", keys[0])
|
||||||
}
|
}
|
||||||
return getJSONValue(childData, keys[1:]...)
|
return getJSONValue(childData, keys[1:]...)
|
||||||
}
|
}
|
||||||
|
@ -148,7 +148,7 @@ func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.C
|
|||||||
return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor)
|
return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor)
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
|
return "", fmt.Errorf("unsupported container resource : %v", fs.Resource)
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
||||||
|
@ -1519,7 +1519,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
|
|||||||
if lbaas.opts.ManageSecurityGroups {
|
if lbaas.opts.ManageSecurityGroups {
|
||||||
err := lbaas.EnsureSecurityGroupDeleted(clusterName, service)
|
err := lbaas.EnsureSecurityGroupDeleted(clusterName, service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
|
return fmt.Errorf("failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1536,7 +1536,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1
|
|||||||
// It is OK when the security group has been deleted by others.
|
// It is OK when the security group has been deleted by others.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err)
|
return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID)
|
lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID)
|
||||||
@ -1567,7 +1567,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1
|
|||||||
for _, rule := range secGroupRules {
|
for _, rule := range secGroupRules {
|
||||||
res := rules.Delete(lbaas.network, rule.ID)
|
res := rules.Delete(lbaas.network, rule.ID)
|
||||||
if res.Err != nil && !isNotFound(res.Err) {
|
if res.Err != nil && !isNotFound(res.Err) {
|
||||||
return fmt.Errorf("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err)
|
return fmt.Errorf("error occurred deleting security group rule: %s: %v", rule.ID, res.Err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -211,11 +211,11 @@ func convertToCM(obj interface{}) (*v1.ConfigMap, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Couldn't get object from tombstone %#v", obj)
|
return nil, fmt.Errorf("couldn't get object from tombstone %#v", obj)
|
||||||
}
|
}
|
||||||
cm, ok = tombstone.Obj.(*v1.ConfigMap)
|
cm, ok = tombstone.Obj.(*v1.ConfigMap)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj)
|
return nil, fmt.Errorf("tombstone contained object that is not a ConfigMap %#v", obj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cm, nil
|
return cm, nil
|
||||||
|
@ -87,7 +87,7 @@ func newCFSSLSigner(caFile, caKeyFile string, client clientset.Interface, certif
|
|||||||
|
|
||||||
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
|
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Malformed private key %v", err)
|
return nil, fmt.Errorf("malformed private key %v", err)
|
||||||
}
|
}
|
||||||
return &cfsslSigner{
|
return &cfsslSigner{
|
||||||
priv: priv,
|
priv: priv,
|
||||||
|
@ -94,7 +94,7 @@ func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time
|
|||||||
starts := []time.Time{}
|
starts := []time.Time{}
|
||||||
sched, err := cron.ParseStandard(sj.Spec.Schedule)
|
sched, err := cron.ParseStandard(sj.Spec.Schedule)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return starts, fmt.Errorf("Unparseable schedule: %s : %s", sj.Spec.Schedule, err)
|
return starts, fmt.Errorf("unparseable schedule: %s : %s", sj.Spec.Schedule, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var earliestTime time.Time
|
var earliestTime time.Time
|
||||||
|
@ -49,7 +49,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v
|
|||||||
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
|
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
|
||||||
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
|
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
|
return fmt.Errorf("couldn't get unavailable numbers: %v", err)
|
||||||
}
|
}
|
||||||
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
|
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
|
||||||
|
|
||||||
@ -416,7 +416,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeL
|
|||||||
}
|
}
|
||||||
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
|
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
return -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
|
||||||
}
|
}
|
||||||
klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||||
return maxUnavailable, numUnavailable, nil
|
return maxUnavailable, numUnavailable, nil
|
||||||
|
@ -857,7 +857,7 @@ func TestGarbageCollectorSync(t *testing.T) {
|
|||||||
|
|
||||||
// Simulate the discovery client returning an error
|
// Simulate the discovery client returning an error
|
||||||
fakeDiscoveryClient.setPreferredResources(nil)
|
fakeDiscoveryClient.setPreferredResources(nil)
|
||||||
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
|
fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
|
||||||
|
|
||||||
// Wait until sync discovers the change
|
// Wait until sync discovers the change
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@ -199,7 +199,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"too few active pods, with controller error": {
|
"too few active pods, with controller error": {
|
||||||
2, 5, 6, false, 0,
|
2, 5, 6, false, 0,
|
||||||
fmt.Errorf("Fake error"), true, 0, 1, 1, 0,
|
fmt.Errorf("fake error"), true, 0, 1, 1, 0,
|
||||||
1, 0, 1, 1, 0, nil, "",
|
1, 0, 1, 1, 0, nil, "",
|
||||||
},
|
},
|
||||||
"too many active pods": {
|
"too many active pods": {
|
||||||
@ -209,17 +209,17 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"too many active pods, with controller error": {
|
"too many active pods, with controller error": {
|
||||||
2, 5, 6, false, 0,
|
2, 5, 6, false, 0,
|
||||||
fmt.Errorf("Fake error"), true, 0, 3, 0, 0,
|
fmt.Errorf("fake error"), true, 0, 3, 0, 0,
|
||||||
0, 1, 3, 0, 0, nil, "",
|
0, 1, 3, 0, 0, nil, "",
|
||||||
},
|
},
|
||||||
"failed + succeed pods: reset backoff delay": {
|
"failed + succeed pods: reset backoff delay": {
|
||||||
2, 5, 6, false, 0,
|
2, 5, 6, false, 0,
|
||||||
fmt.Errorf("Fake error"), true, 0, 1, 1, 1,
|
fmt.Errorf("fake error"), true, 0, 1, 1, 1,
|
||||||
1, 0, 1, 1, 1, nil, "",
|
1, 0, 1, 1, 1, nil, "",
|
||||||
},
|
},
|
||||||
"only new failed pod": {
|
"only new failed pod": {
|
||||||
2, 5, 6, false, 0,
|
2, 5, 6, false, 0,
|
||||||
fmt.Errorf("Fake error"), false, 0, 1, 0, 1,
|
fmt.Errorf("fake error"), false, 0, 1, 0, 1,
|
||||||
1, 0, 1, 0, 1, nil, "",
|
1, 0, 1, 0, 1, nil, "",
|
||||||
},
|
},
|
||||||
"job finish": {
|
"job finish": {
|
||||||
@ -575,7 +575,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
|||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
updateError := fmt.Errorf("Update error")
|
updateError := fmt.Errorf("update error")
|
||||||
manager.updateHandler = func(job *batch.Job) error {
|
manager.updateHandler = func(job *batch.Job) error {
|
||||||
manager.queue.AddRateLimited(testutil.GetKey(job, t))
|
manager.queue.AddRateLimited(testutil.GetKey(job, t))
|
||||||
return updateError
|
return updateError
|
||||||
|
@ -101,7 +101,7 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo
|
|||||||
case CloudAllocatorType:
|
case CloudAllocatorType:
|
||||||
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType)
|
return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}); pollErr != nil {
|
}); pollErr != nil {
|
||||||
return nil, fmt.Errorf("Failed to list all nodes in %v, cannot proceed without updating CIDR map",
|
return nil, fmt.Errorf("failed to list all nodes in %v, cannot proceed without updating CIDR map",
|
||||||
apiserverStartupGracePeriod)
|
apiserverStartupGracePeriod)
|
||||||
}
|
}
|
||||||
return nodeList, nil
|
return nodeList, nil
|
||||||
|
@ -162,7 +162,7 @@ func (s *CidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err
|
|||||||
var ipSize int
|
var ipSize int
|
||||||
|
|
||||||
if cidr == nil {
|
if cidr == nil {
|
||||||
return -1, -1, fmt.Errorf("Error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR)
|
return -1, -1, fmt.Errorf("error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {
|
if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {
|
||||||
|
@ -276,12 +276,12 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
|||||||
for idx, cidr := range node.Spec.PodCIDRs {
|
for idx, cidr := range node.Spec.PodCIDRs {
|
||||||
_, podCIDR, err := net.ParseCIDR(cidr)
|
_, podCIDR, err := net.ParseCIDR(cidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
|
return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name)
|
klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name)
|
||||||
if err = r.cidrSets[idx].Release(podCIDR); err != nil {
|
if err = r.cidrSets[idx].Release(podCIDR); err != nil {
|
||||||
return fmt.Errorf("Error when releasing CIDR %v: %v", cidr, err)
|
return fmt.Errorf("error when releasing CIDR %v: %v", cidr, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -278,7 +278,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
|||||||
// If all metrics are invalid return error and set condition on hpa based on first invalid metric.
|
// If all metrics are invalid return error and set condition on hpa based on first invalid metric.
|
||||||
if invalidMetricsCount >= len(metricSpecs) {
|
if invalidMetricsCount >= len(metricSpecs) {
|
||||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
|
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
|
||||||
return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
|
return 0, "", statuses, time.Time{}, fmt.Errorf("invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
|
||||||
}
|
}
|
||||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
||||||
return replicas, metric, statuses, timestamp, nil
|
return replicas, metric, statuses, timestamp, nil
|
||||||
|
@ -326,7 +326,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||||||
rsSpec.Status.ReadyReplicas = 1
|
rsSpec.Status.ReadyReplicas = 1
|
||||||
rsSpec.Status.AvailableReplicas = 1
|
rsSpec.Status.AvailableReplicas = 1
|
||||||
fakePodControl.Clear()
|
fakePodControl.Clear()
|
||||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
fakePodControl.Err = fmt.Errorf("fake Error")
|
||||||
|
|
||||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||||
@ -670,7 +670,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
|
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
|
||||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error")
|
return true, &apps.ReplicaSet{}, fmt.Errorf("fake error")
|
||||||
})
|
})
|
||||||
fakeRSClient := fakeClient.AppsV1().ReplicaSets("default")
|
fakeRSClient := fakeClient.AppsV1().ReplicaSets("default")
|
||||||
numReplicas := int32(10)
|
numReplicas := int32(10)
|
||||||
@ -1136,11 +1136,11 @@ func TestPatchPodFails(t *testing.T) {
|
|||||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false))
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false))
|
||||||
// let both patches fail. The rs controller will assume it fails to take
|
// let both patches fail. The rs controller will assume it fails to take
|
||||||
// control of the pods and requeue to try again.
|
// control of the pods and requeue to try again.
|
||||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
fakePodControl.Err = fmt.Errorf("fake Error")
|
||||||
rsKey := GetKey(rs, t)
|
rsKey := GetKey(rs, t)
|
||||||
err := processSync(manager, rsKey)
|
err := processSync(manager, rsKey)
|
||||||
if err == nil || !strings.Contains(err.Error(), "Fake Error") {
|
if err == nil || !strings.Contains(err.Error(), "fake Error") {
|
||||||
t.Errorf("expected Fake Error, got %+v", err)
|
t.Errorf("expected fake Error, got %+v", err)
|
||||||
}
|
}
|
||||||
// 2 patches to take control of pod1 and pod2 (both fail).
|
// 2 patches to take control of pod1 and pod2 (both fail).
|
||||||
validateSyncReplicaSet(t, fakePodControl, 0, 0, 2)
|
validateSyncReplicaSet(t, fakePodControl, 0, 0, 2)
|
||||||
|
@ -1086,7 +1086,7 @@ func TestDiscoverySync(t *testing.T) {
|
|||||||
|
|
||||||
// Simulate the discovery client returning an error
|
// Simulate the discovery client returning an error
|
||||||
fakeDiscoveryClient.setPreferredResources(nil)
|
fakeDiscoveryClient.setPreferredResources(nil)
|
||||||
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
|
fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
|
||||||
|
|
||||||
// Wait until sync discovers the change
|
// Wait until sync discovers the change
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@ -621,7 +621,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
|
|||||||
// with various kubernetes errors.
|
// with various kubernetes errors.
|
||||||
func TestProcessServiceCreateOrUpdateK8sError(t *testing.T) {
|
func TestProcessServiceCreateOrUpdateK8sError(t *testing.T) {
|
||||||
svcName := "svc-k8s-err"
|
svcName := "svc-k8s-err"
|
||||||
conflictErr := apierrors.NewConflict(schema.GroupResource{}, svcName, errors.New("Object conflict"))
|
conflictErr := apierrors.NewConflict(schema.GroupResource{}, svcName, errors.New("object conflict"))
|
||||||
notFoundErr := apierrors.NewNotFound(schema.GroupResource{}, svcName)
|
notFoundErr := apierrors.NewNotFound(schema.GroupResource{}, svcName)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -710,7 +710,7 @@ func TestSyncService(t *testing.T) {
|
|||||||
srv := controller.cache.getOrCreate("external-balancer")
|
srv := controller.cache.getOrCreate("external-balancer")
|
||||||
srv.state = defaultExternalService()
|
srv.state = defaultExternalService()
|
||||||
},
|
},
|
||||||
expectedErr: fmt.Errorf("Service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."),
|
expectedErr: fmt.Errorf("service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."),
|
||||||
},
|
},
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -780,12 +780,12 @@ func TestProcessServiceDeletion(t *testing.T) {
|
|||||||
|
|
||||||
svc := controller.cache.getOrCreate(svcKey)
|
svc := controller.cache.getOrCreate(svcKey)
|
||||||
svc.state = defaultExternalService()
|
svc.state = defaultExternalService()
|
||||||
cloud.Err = fmt.Errorf("Error Deleting the Loadbalancer")
|
cloud.Err = fmt.Errorf("error Deleting the Loadbalancer")
|
||||||
|
|
||||||
},
|
},
|
||||||
expectedFn: func(svcErr error) error {
|
expectedFn: func(svcErr error) error {
|
||||||
|
|
||||||
expectedError := "Error Deleting the Loadbalancer"
|
expectedError := "error Deleting the Loadbalancer"
|
||||||
|
|
||||||
if svcErr == nil || svcErr.Error() != expectedError {
|
if svcErr == nil || svcErr.Error() != expectedError {
|
||||||
return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr)
|
return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr)
|
||||||
@ -1110,7 +1110,7 @@ func TestServiceCache(t *testing.T) {
|
|||||||
return fmt.Errorf("is Available Expected=true Obtained=%v", bool)
|
return fmt.Errorf("is Available Expected=true Obtained=%v", bool)
|
||||||
}
|
}
|
||||||
if Cs == nil {
|
if Cs == nil {
|
||||||
return fmt.Errorf("CachedService expected:non-nil Obtained=nil")
|
return fmt.Errorf("cachedService expected:non-nil Obtained=nil")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@ -1125,7 +1125,7 @@ func TestServiceCache(t *testing.T) {
|
|||||||
//It should have two elements
|
//It should have two elements
|
||||||
keys := sc.ListKeys()
|
keys := sc.ListKeys()
|
||||||
if len(keys) != 2 {
|
if len(keys) != 2 {
|
||||||
return fmt.Errorf("Elementes Expected=2 Obtained=%v", len(keys))
|
return fmt.Errorf("elements Expected=2 Obtained=%v", len(keys))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -247,7 +247,7 @@ func TestTokenCreation(t *testing.T) {
|
|||||||
return func(core.Action) (bool, runtime.Object, error) {
|
return func(core.Action) (bool, runtime.Object, error) {
|
||||||
i++
|
i++
|
||||||
if i < 3 {
|
if i < 3 {
|
||||||
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("No can do"))
|
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("no can do"))
|
||||||
}
|
}
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
@ -278,7 +278,7 @@ func TestTokenCreation(t *testing.T) {
|
|||||||
resource: "secrets",
|
resource: "secrets",
|
||||||
reactor: func(t *testing.T) core.ReactionFunc {
|
reactor: func(t *testing.T) core.ReactionFunc {
|
||||||
return func(core.Action) (bool, runtime.Object, error) {
|
return func(core.Action) (bool, runtime.Object, error) {
|
||||||
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("No can do"))
|
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("no can do"))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
@ -184,13 +184,13 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef
|
|||||||
case apierrors.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim)
|
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err))
|
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
|
||||||
}
|
}
|
||||||
if err == nil || !apierrors.IsAlreadyExists(err) {
|
if err == nil || !apierrors.IsAlreadyExists(err) {
|
||||||
spc.recordClaimEvent("create", set, pod, &claim, err)
|
spc.recordClaimEvent("create", set, pod, &claim, err)
|
||||||
}
|
}
|
||||||
case err != nil:
|
case err != nil:
|
||||||
errs = append(errs, fmt.Errorf("Failed to retrieve PVC %s: %s", claim.Name, err))
|
errs = append(errs, fmt.Errorf("failed to retrieve PVC %s: %s", claim.Name, err))
|
||||||
spc.recordClaimEvent("create", set, pod, &claim, err)
|
spc.recordClaimEvent("create", set, pod, &claim, err)
|
||||||
}
|
}
|
||||||
// TODO: Check resource requirements and accessmodes, update if necessary
|
// TODO: Check resource requirements and accessmodes, update if necessary
|
||||||
|
@ -205,7 +205,7 @@ func TestStatefulPodControlNoOpUpdate(t *testing.T) {
|
|||||||
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
|
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
|
||||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
t.Error("no-op update should not make any client invocation")
|
t.Error("no-op update should not make any client invocation")
|
||||||
return true, nil, apierrors.NewInternalError(errors.New("If we are here we have a problem"))
|
return true, nil, apierrors.NewInternalError(errors.New("if we are here we have a problem"))
|
||||||
})
|
})
|
||||||
if err := control.UpdateStatefulPod(set, pod); err != nil {
|
if err := control.UpdateStatefulPod(set, pod); err != nil {
|
||||||
t.Errorf("Error returned on no-op update error: %s", err)
|
t.Errorf("Error returned on no-op update error: %s", err)
|
||||||
|
@ -1774,7 +1774,7 @@ func assertMonotonicInvariants(set *apps.StatefulSet, spc *fakeStatefulPodContro
|
|||||||
sort.Sort(ascendingOrdinal(pods))
|
sort.Sort(ascendingOrdinal(pods))
|
||||||
for ord := 0; ord < len(pods); ord++ {
|
for ord := 0; ord < len(pods); ord++ {
|
||||||
if ord > 0 && isRunningAndReady(pods[ord]) && !isRunningAndReady(pods[ord-1]) {
|
if ord > 0 && isRunningAndReady(pods[ord]) && !isRunningAndReady(pods[ord-1]) {
|
||||||
return fmt.Errorf("Successor %s is Running and Ready while %s is not", pods[ord].Name, pods[ord-1].Name)
|
return fmt.Errorf("successor %s is Running and Ready while %s is not", pods[ord].Name, pods[ord-1].Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if getOrdinal(pods[ord]) != ord {
|
if getOrdinal(pods[ord]) != ord {
|
||||||
|
@ -95,7 +95,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflict(t *testing.T) {
|
|||||||
update := action.(core.UpdateAction)
|
update := action.(core.UpdateAction)
|
||||||
if !conflict {
|
if !conflict {
|
||||||
conflict = true
|
conflict = true
|
||||||
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists"))
|
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists"))
|
||||||
}
|
}
|
||||||
return true, update.GetObject(), nil
|
return true, update.GetObject(), nil
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflictFailure(t *testing.T) {
|
|||||||
updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister)
|
updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister)
|
||||||
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
update := action.(core.UpdateAction)
|
update := action.(core.UpdateAction)
|
||||||
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists"))
|
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists"))
|
||||||
})
|
})
|
||||||
if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
|
if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
|
||||||
t.Error("UpdateStatefulSetStatus failed to return an error on get failure")
|
t.Error("UpdateStatefulSetStatus failed to return an error on get failure")
|
||||||
|
@ -247,7 +247,7 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("Not found node %v", node)
|
return nil, fmt.Errorf("not found node %v", node)
|
||||||
}
|
}
|
||||||
|
|
||||||
origNodeCopy.Status = node.Status
|
origNodeCopy.Status = node.Status
|
||||||
|
@ -267,7 +267,7 @@ func needsCleanup(j *batch.Job) bool {
|
|||||||
|
|
||||||
func getFinishAndExpireTime(j *batch.Job) (*time.Time, *time.Time, error) {
|
func getFinishAndExpireTime(j *batch.Job) (*time.Time, *time.Time, error) {
|
||||||
if !needsCleanup(j) {
|
if !needsCleanup(j) {
|
||||||
return nil, nil, fmt.Errorf("Job %s/%s should not be cleaned up", j.Namespace, j.Name)
|
return nil, nil, fmt.Errorf("job %s/%s should not be cleaned up", j.Namespace, j.Name)
|
||||||
}
|
}
|
||||||
finishAt, err := jobFinishTime(j)
|
finishAt, err := jobFinishTime(j)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -357,7 +357,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
|||||||
|
|
||||||
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
|
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to SetVolumeMountedByNode with error: %v", err)
|
return fmt.Errorf("failed to SetVolumeMountedByNode with error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeObj.mountedByNode = mounted
|
nodeObj.mountedByNode = mounted
|
||||||
@ -390,7 +390,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
|
|||||||
|
|
||||||
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
|
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("Failed to set detach request time with error: %v", err)
|
return 0, fmt.Errorf("failed to set detach request time with error: %v", err)
|
||||||
}
|
}
|
||||||
// If there is no previous detach request, set it to the current time
|
// If there is no previous detach request, set it to the current time
|
||||||
if nodeObj.detachRequestedTime.IsZero() {
|
if nodeObj.detachRequestedTime.IsZero() {
|
||||||
|
Loading…
Reference in New Issue
Block a user