mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Don't report deletion of attached volume as warning
It is part of normal cluster operation. When a pod and PVC are deleted at the same time (e.g. because of the whole namespace was deleted), PV controller may try to delete a volume that is still detached. "Warning" is too strong here, it is going to heal relatively quickly.
This commit is contained in:
parent
cfdc365525
commit
ee85b6a579
@ -65,3 +65,13 @@ func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) err
|
||||
DevicePath: devicePath,
|
||||
}
|
||||
}
|
||||
|
||||
// IsDanglingError returns true if an error is DanglingAttachError
|
||||
func IsDanglingError(err error) bool {
|
||||
switch err.(type) {
|
||||
case *DanglingAttachError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -2578,6 +2578,10 @@ func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) {
|
||||
klog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID)
|
||||
return false, nil
|
||||
}
|
||||
if volerr.IsDanglingError(err) {
|
||||
// The volume is still attached somewhere
|
||||
return false, volerr.NewDeletedVolumeInUseError(err.Error())
|
||||
}
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
@ -2598,7 +2602,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string)
|
||||
}
|
||||
|
||||
volumeState := aws.StringValue(info.State)
|
||||
opError := fmt.Sprintf("Error %s EBS volume %q", opName, disk.awsID)
|
||||
opError := fmt.Sprintf("error %s EBS volume %q", opName, disk.awsID)
|
||||
if len(instance) != 0 {
|
||||
opError = fmt.Sprintf("%q to instance %q", opError, instance)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user