diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index ccc924efd78..5275129b791 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1673,8 +1673,8 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if !kl.podIsTerminated(pod) { // Wait for volumes to attach/mount if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil { - kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err) - klog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to attach or mount volumes: %v", err) + klog.Errorf("Unable to attach or mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) return err } } diff --git a/pkg/kubelet/volumemanager/BUILD b/pkg/kubelet/volumemanager/BUILD index f967754b53f..30666c1d4ca 100644 --- a/pkg/kubelet/volumemanager/BUILD +++ b/pkg/kubelet/volumemanager/BUILD @@ -61,6 +61,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", diff --git a/pkg/kubelet/volumemanager/cache/BUILD b/pkg/kubelet/volumemanager/cache/BUILD index 9d6a95049d6..688209d6e13 100644 --- a/pkg/kubelet/volumemanager/cache/BUILD +++ b/pkg/kubelet/volumemanager/cache/BUILD @@ -23,6 +23,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 88a26e5b2cc..31c5de4e8ae 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -21,6 +21,7 @@ caches in sync with the "ground truth". package populator import ( + "errors" "fmt" "sync" "time" @@ -317,7 +318,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue) if err != nil { klog.Errorf( - "Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v", + "Failed to add volume %s (specName: %s) for pod %q to desiredStateOfWorld: %v", podVolume.Name, volumeSpec.Name(), uniquePodName, @@ -497,7 +498,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( podNamespace, pvcSource.ClaimName) if err != nil { return nil, nil, "", fmt.Errorf( - "error processing PVC %q/%q: %v", + "error processing PVC %s/%s: %v", podNamespace, pvcSource.ClaimName, err) @@ -516,7 +517,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID) if err != nil { return nil, nil, "", fmt.Errorf( - "error processing PVC %q/%q: %v", + "error processing PVC %s/%s: %v", podNamespace, pvcSource.ClaimName, err) @@ -539,20 +540,16 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( // Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem if mountsMap[podVolume.Name] && volumeMode != v1.PersistentVolumeFilesystem { return nil, nil, "", fmt.Errorf( - "Volume %q has volumeMode %q, but is specified in volumeMounts for pod %q/%q", + "volume %s has volumeMode %s, but is specified in volumeMounts", podVolume.Name, - volumeMode, - podNamespace, - podName) + volumeMode) } // Error if a container has volumeDevices but the volumeMode of PVC isn't Block if devicesMap[podVolume.Name] && volumeMode != v1.PersistentVolumeBlock { return nil, nil, "", fmt.Errorf( - "Volume %q has volumeMode %q, but is specified in volumeDevices for pod %q/%q", + "volume %s has volumeMode %s, but is specified in volumeDevices", podVolume.Name, - volumeMode, - podNamespace, - podName) + volumeMode) } } return pvc, volumeSpec, volumeGidValue, nil @@ -573,11 +570,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( pvc, err := dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err != nil || pvc == nil { - return nil, fmt.Errorf( - "failed to fetch PVC %s/%s from API server. err=%v", - namespace, - claimName, - err) + return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err) } if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) { @@ -590,21 +583,15 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( // It should happen only in very rare case when scheduler schedules // a pod and user deletes a PVC that's used by it at the same time. if pvc.ObjectMeta.DeletionTimestamp != nil { - return nil, fmt.Errorf( - "can't start pod because PVC %s/%s is being deleted", - namespace, - claimName) + return nil, errors.New("PVC is being deleted") } } - if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { - - return nil, fmt.Errorf( - "PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", - namespace, - claimName, - pvc.Status.Phase, - pvc.Spec.VolumeName) + if pvc.Status.Phase != v1.ClaimBound { + return nil, errors.New("PVC is not bound") + } + if pvc.Spec.VolumeName == "" { + return nil, errors.New("PVC has empty pvc.Spec.VolumeName") } return pvc, nil @@ -620,18 +607,18 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( - "failed to fetch PV %q from API server. err=%v", name, err) + "failed to fetch PV %s from API server: %v", name, err) } if pv.Spec.ClaimRef == nil { return nil, "", fmt.Errorf( - "found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim", + "found PV object %s but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim", name) } if pv.Spec.ClaimRef.UID != expectedClaimUID { return nil, "", fmt.Errorf( - "found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)", + "found PV object %s but its pv.Spec.ClaimRef.UID %s does not point to claim.UID %s", name, pv.Spec.ClaimRef.UID, expectedClaimUID) diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index 7d3eb8b0b78..301916a3d13 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -376,12 +376,10 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { } return fmt.Errorf( - "failed to attach or mount for pod %q/%q: %s. List of unmounted volumes=%v, list of unattached volumes=%v.", - pod.Namespace, - pod.Name, - err, + "unmounted volumes=%v, unattached volumes=%v: %s", unmountedVolumes, - unattachedVolumes) + unattachedVolumes, + err) } klog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))