mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #94992 from lala123912/gxf
fix pkg/volume/util static checks
This commit is contained in:
commit
978233775e
@ -258,16 +258,16 @@ func TestFindSlaveDevicesOnMultipath(t *testing.T) {
|
||||
func TestGetISCSIPortalHostMapForTarget(t *testing.T) {
|
||||
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
|
||||
portalHostMap, err := mockDeviceUtil.GetISCSIPortalHostMapForTarget("target1")
|
||||
if nil != err {
|
||||
if err != nil {
|
||||
t.Fatalf("error getting scsi hosts for target: %v", err)
|
||||
}
|
||||
if nil == portalHostMap {
|
||||
if portalHostMap == nil {
|
||||
t.Fatal("no portal host map returned")
|
||||
}
|
||||
if 1 != len(portalHostMap) {
|
||||
if len(portalHostMap) != 1 {
|
||||
t.Fatalf("wrong number of map entries in portal host map: %d", len(portalHostMap))
|
||||
}
|
||||
if 2 != portalHostMap["10.0.0.1:3260"] {
|
||||
if portalHostMap["10.0.0.1:3260"] != 2 {
|
||||
t.Fatalf("incorrect entry in portal host map: %v", portalHostMap)
|
||||
}
|
||||
}
|
||||
@ -275,16 +275,16 @@ func TestGetISCSIPortalHostMapForTarget(t *testing.T) {
|
||||
func TestFindDevicesForISCSILun(t *testing.T) {
|
||||
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
|
||||
devices, err := mockDeviceUtil.FindDevicesForISCSILun("target1", 1)
|
||||
if nil != err {
|
||||
if err != nil {
|
||||
t.Fatalf("error getting devices for lun: %v", err)
|
||||
}
|
||||
if nil == devices {
|
||||
if devices == nil {
|
||||
t.Fatal("no devices returned")
|
||||
}
|
||||
if 1 != len(devices) {
|
||||
if len(devices) != 1 {
|
||||
t.Fatalf("wrong number of devices: %d", len(devices))
|
||||
}
|
||||
if "sda" != devices[0] {
|
||||
if devices[0] != "sda" {
|
||||
t.Fatalf("incorrect device %v", devices)
|
||||
}
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ func getXFSQuotaCmd() (string, error) {
|
||||
}
|
||||
}
|
||||
quotaCmdInitialized = true
|
||||
return "", fmt.Errorf("No xfs_quota program found")
|
||||
return "", fmt.Errorf("no xfs_quota program found")
|
||||
}
|
||||
|
||||
func doRunXFSQuotaCommand(mountpoint string, mountsFile, command string) (string, error) {
|
||||
@ -145,7 +145,7 @@ func doRunXFSQuotaCommand(mountpoint string, mountsFile, command string) (string
|
||||
func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
|
||||
tmpMounts, err := ioutil.TempFile("", "mounts")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot create temporary mount file: %v", err)
|
||||
return "", fmt.Errorf("cannot create temporary mount file: %v", err)
|
||||
}
|
||||
tmpMountsFileName := tmpMounts.Name()
|
||||
defer tmpMounts.Close()
|
||||
@ -153,7 +153,7 @@ func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
|
||||
|
||||
mounts, err := os.Open(MountsFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot open mounts file %s: %v", MountsFile, err)
|
||||
return "", fmt.Errorf("cannot open mounts file %s: %v", MountsFile, err)
|
||||
}
|
||||
defer mounts.Close()
|
||||
|
||||
@ -164,16 +164,16 @@ func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
|
||||
mount := match[2]
|
||||
if mount == mountpoint {
|
||||
if _, err := tmpMounts.WriteString(fmt.Sprintf("%s\n", scanner.Text())); err != nil {
|
||||
return "", fmt.Errorf("Cannot write temporary mounts file: %v", err)
|
||||
return "", fmt.Errorf("cannot write temporary mounts file: %v", err)
|
||||
}
|
||||
if err := tmpMounts.Sync(); err != nil {
|
||||
return "", fmt.Errorf("Cannot sync temporary mounts file: %v", err)
|
||||
return "", fmt.Errorf("cannot sync temporary mounts file: %v", err)
|
||||
}
|
||||
return doRunXFSQuotaCommand(mountpoint, tmpMountsFileName, command)
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Cannot run xfs_quota: cannot find mount point %s in %s", mountpoint, MountsFile)
|
||||
return "", fmt.Errorf("cannot run xfs_quota: cannot find mount point %s in %s", mountpoint, MountsFile)
|
||||
}
|
||||
|
||||
// SupportsQuotas determines whether the filesystem supports quotas.
|
||||
@ -215,14 +215,14 @@ func (v linuxVolumeQuotaApplier) GetQuotaOnDir(path string) (QuotaID, error) {
|
||||
}
|
||||
match := lsattrParseRegexp.FindStringSubmatch(string(data))
|
||||
if match == nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse lsattr -pd %s output %s", path, string(data))
|
||||
return BadQuotaID, fmt.Errorf("unable to parse lsattr -pd %s output %s", path, string(data))
|
||||
}
|
||||
if match[2] != path {
|
||||
return BadQuotaID, fmt.Errorf("Mismatch between supplied and returned path (%s != %s)", path, match[2])
|
||||
return BadQuotaID, fmt.Errorf("mismatch between supplied and returned path (%s != %s)", path, match[2])
|
||||
}
|
||||
projid, err := strconv.ParseInt(match[1], 10, 32)
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse project ID from %s (%v)", match[1], err)
|
||||
return BadQuotaID, fmt.Errorf("unable to parse project ID from %s (%v)", match[1], err)
|
||||
}
|
||||
return QuotaID(projid), nil
|
||||
}
|
||||
@ -244,18 +244,18 @@ func (v linuxVolumeQuotaApplier) SetQuotaOnDir(path string, id QuotaID, bytes in
|
||||
func getQuantity(mountpoint string, id QuotaID, xfsQuotaArg string, multiplier int64, allowEmptyOutput bool) (int64, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, fmt.Sprintf("quota -p -N -n -v %s %v", xfsQuotaArg, id))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to run xfs_quota: %v", err)
|
||||
return 0, fmt.Errorf("unable to run xfs_quota: %v", err)
|
||||
}
|
||||
if data == "" && allowEmptyOutput {
|
||||
return 0, nil
|
||||
}
|
||||
match := quotaParseRegexp.FindStringSubmatch(data)
|
||||
if match == nil {
|
||||
return 0, fmt.Errorf("Unable to parse quota output '%s'", data)
|
||||
return 0, fmt.Errorf("unable to parse quota output '%s'", data)
|
||||
}
|
||||
size, err := strconv.ParseInt(match[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to parse data size '%s' from '%s': %v", match[1], data, err)
|
||||
return 0, fmt.Errorf("unable to parse data size '%s' from '%s': %v", match[1], data, err)
|
||||
}
|
||||
klog.V(4).Infof("getQuantity %s %d %s %d => %d %v", mountpoint, id, xfsQuotaArg, multiplier, size, err)
|
||||
return size * multiplier, nil
|
||||
|
@ -179,7 +179,7 @@ func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.Quot
|
||||
}
|
||||
}
|
||||
}
|
||||
return common.BadQuotaID, fmt.Errorf("Cannot find available quota ID")
|
||||
return common.BadQuotaID, fmt.Errorf("cannot find available quota ID")
|
||||
}
|
||||
|
||||
func addDirToProject(path string, id common.QuotaID, list *projectsList) (common.QuotaID, bool, error) {
|
||||
@ -187,7 +187,7 @@ func addDirToProject(path string, id common.QuotaID, list *projectsList) (common
|
||||
for _, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
return common.BadQuotaID, false, fmt.Errorf("Attempt to reassign project ID for %s", path)
|
||||
return common.BadQuotaID, false, fmt.Errorf("attempt to reassign project ID for %s", path)
|
||||
}
|
||||
// Trying to reassign a directory to the project it's
|
||||
// already in. Maybe this should be an error, but for
|
||||
@ -223,16 +223,16 @@ func addDirToProject(path string, id common.QuotaID, list *projectsList) (common
|
||||
|
||||
func removeDirFromProject(path string, id common.QuotaID, list *projectsList) (bool, error) {
|
||||
if id == common.BadQuotaID {
|
||||
return false, fmt.Errorf("Attempt to remove invalid quota ID from %s", path)
|
||||
return false, fmt.Errorf("attempt to remove invalid quota ID from %s", path)
|
||||
}
|
||||
foundAt := -1
|
||||
countByID := make(map[common.QuotaID]int)
|
||||
for i, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
return false, fmt.Errorf("Attempting to remove quota ID %v from path %s, but expecting ID %v", id, path, project.id)
|
||||
return false, fmt.Errorf("attempting to remove quota ID %v from path %s, but expecting ID %v", id, path, project.id)
|
||||
} else if foundAt != -1 {
|
||||
return false, fmt.Errorf("Found multiple quota IDs for path %s", path)
|
||||
return false, fmt.Errorf("found multiple quota IDs for path %s", path)
|
||||
}
|
||||
// Faster and easier than deleting an element
|
||||
list.projects[i].isValid = false
|
||||
@ -241,7 +241,7 @@ func removeDirFromProject(path string, id common.QuotaID, list *projectsList) (b
|
||||
countByID[project.id]++
|
||||
}
|
||||
if foundAt == -1 {
|
||||
return false, fmt.Errorf("Cannot find quota associated with path %s", path)
|
||||
return false, fmt.Errorf("cannot find quota associated with path %s", path)
|
||||
}
|
||||
if countByID[id] <= 1 {
|
||||
// Removing the last entry means that we're no longer using
|
||||
@ -314,7 +314,7 @@ func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, l
|
||||
}
|
||||
os.Remove(tmpProjects)
|
||||
}
|
||||
return fmt.Errorf("Unable to write project files: %v", err)
|
||||
return fmt.Errorf("unable to write project files: %v", err)
|
||||
}
|
||||
|
||||
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
|
||||
|
@ -170,7 +170,7 @@ func getFSInfo(m mount.Interface, path string) (string, string, error) {
|
||||
if !okMountpoint {
|
||||
mountpoint, err = detectMountpoint(m, path)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Cannot determine mountpoint for %s: %v", path, err)
|
||||
return "", "", fmt.Errorf("cannot determine mountpoint for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ func getFSInfo(m mount.Interface, path string) (string, string, error) {
|
||||
if !okBackingDev {
|
||||
backingDev, err = detectBackingDev(m, mountpoint)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Cannot determine backing device for %s: %v", path, err)
|
||||
return "", "", fmt.Errorf("cannot determine backing device for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
mountpointMap[path] = mountpoint
|
||||
@ -306,11 +306,11 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
//lint:ignore SA4009 poduid is overwritten by design, see comment below
|
||||
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error {
|
||||
if bytes == nil {
|
||||
return fmt.Errorf("Attempting to assign null quota to %s", path)
|
||||
return fmt.Errorf("attempting to assign null quota to %s", path)
|
||||
}
|
||||
ibytes := bytes.Value()
|
||||
if ok, err := SupportsQuotas(m, path); !ok {
|
||||
return fmt.Errorf("Quotas not supported on %s: %v", path, err)
|
||||
return fmt.Errorf("quotas not supported on %s: %v", path, err)
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
@ -321,12 +321,12 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
|
||||
// one quota per volume, we can rip all of the pod code out.
|
||||
poduid = types.UID(uuid.NewUUID())
|
||||
if pod, ok := dirPodMap[path]; ok && pod != poduid {
|
||||
return fmt.Errorf("Requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
}
|
||||
oid, ok := podQuotaMap[poduid]
|
||||
if ok {
|
||||
if quotaSizeMap[oid] != ibytes {
|
||||
return fmt.Errorf("Requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
|
||||
return fmt.Errorf("requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
|
||||
}
|
||||
} else {
|
||||
oid = common.BadQuotaID
|
||||
@ -334,7 +334,7 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
|
||||
id, err := createProjectID(path, oid)
|
||||
if err == nil {
|
||||
if oid != common.BadQuotaID && oid != id {
|
||||
return fmt.Errorf("Attempt to reassign quota %v to %v", oid, id)
|
||||
return fmt.Errorf("attempt to reassign quota %v to %v", oid, id)
|
||||
}
|
||||
// When enforcing quotas are enabled, we'll condition this
|
||||
// on their being disabled also.
|
||||
@ -353,7 +353,7 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
|
||||
}
|
||||
removeProjectID(path, id)
|
||||
}
|
||||
return fmt.Errorf("Assign quota FAILED %v", err)
|
||||
return fmt.Errorf("assign quota FAILED %v", err)
|
||||
}
|
||||
|
||||
// GetConsumption -- retrieve the consumption (in bytes) of the directory
|
||||
@ -396,7 +396,7 @@ func GetInodes(path string) (*resource.Quantity, error) {
|
||||
func ClearQuota(m mount.Interface, path string) error {
|
||||
klog.V(3).Infof("ClearQuota %s", path)
|
||||
if !enabledQuotasForMonitoring() {
|
||||
return fmt.Errorf("ClearQuota called, but quotas disabled")
|
||||
return fmt.Errorf("clearQuota called, but quotas disabled")
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
@ -413,7 +413,7 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
}
|
||||
_, ok = podQuotaMap[poduid]
|
||||
if !ok {
|
||||
return fmt.Errorf("ClearQuota: No quota available for %s", path)
|
||||
return fmt.Errorf("clearQuota: No quota available for %s", path)
|
||||
}
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
if err != nil {
|
||||
@ -422,7 +422,7 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
klog.V(3).Infof("Attempt to check quota ID %v on dir %s failed: %v", dirQuotaMap[path], path, err)
|
||||
}
|
||||
if projid != dirQuotaMap[path] {
|
||||
return fmt.Errorf("Expected quota ID %v on dir %s does not match actual %v", dirQuotaMap[path], path, projid)
|
||||
return fmt.Errorf("expected quota ID %v on dir %s does not match actual %v", dirQuotaMap[path], path, projid)
|
||||
}
|
||||
count, ok := podDirCountMap[poduid]
|
||||
if count <= 1 || !ok {
|
||||
@ -446,7 +446,7 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
delete(supportsQuotasMap, path)
|
||||
clearApplier(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to clear quota for %s: %v", path, err)
|
||||
return fmt.Errorf("unable to clear quota for %s: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -304,10 +304,7 @@ func TestGetFileType(t *testing.T) {
|
||||
}
|
||||
|
||||
func isOperationNotPermittedError(err error) bool {
|
||||
if strings.Contains(err.Error(), "Operation not permitted") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return strings.Contains(err.Error(), "Operation not permitted")
|
||||
}
|
||||
|
||||
func writeFile(content string) (string, string, error) {
|
||||
|
@ -48,7 +48,7 @@ func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
|
||||
for _, myMountPoint := range myMountPoints {
|
||||
if strings.HasPrefix(myMountPoint, parentPrefix) {
|
||||
// Don't let a container trick us into creating directories outside of its rootfs
|
||||
return fmt.Errorf("Invalid container mount point %v", myMountPoint)
|
||||
return fmt.Errorf("invalid container mount point %v", myMountPoint)
|
||||
}
|
||||
myMPSlash := myMountPoint + string(os.PathSeparator)
|
||||
// The previously found nested mountpoint (or "" if none found yet)
|
||||
@ -75,10 +75,7 @@ func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
|
||||
var retErr error
|
||||
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
|
||||
retErr = checkContainer(c)
|
||||
if retErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return retErr == nil
|
||||
})
|
||||
if retErr != nil {
|
||||
return nil, retErr
|
||||
@ -96,7 +93,7 @@ func MakeNestedMountpoints(name, baseDir string, pod v1.Pod) error {
|
||||
for _, dir := range dirs {
|
||||
err := os.MkdirAll(filepath.Join(baseDir, dir), 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create nested volume mountpoints: %v", err)
|
||||
return fmt.Errorf("unable to create nested volume mountpoints: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -240,7 +240,7 @@ func (grm *nestedPendingOperations) getOperation(key operationKey) (uint, error)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Operation %+v not found", key)
|
||||
return 0, fmt.Errorf("operation %+v not found", key)
|
||||
}
|
||||
|
||||
func (grm *nestedPendingOperations) deleteOperation(key operationKey) {
|
||||
|
@ -118,7 +118,7 @@ func Test_NestedPendingOperations_Positive_SecondOpAfterFirstCompletes(t *testin
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateCallbackFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -150,7 +150,7 @@ func Test_NestedPendingOperations_Positive_SecondOpAfterFirstCompletesWithExpBac
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateCallbackFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -242,7 +242,7 @@ func Test_NestedPendingOperations_Negative_SecondOpBeforeFirstCompletes(t *testi
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -309,7 +309,7 @@ func Test_NestedPendingOperations_Negative_SecondSubOpBeforeFirstCompletes2(t *t
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operationPodName := volumetypes.UniquePodName("operation-podname")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, operationPodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -334,7 +334,7 @@ func Test_NestedPendingOperations_Negative_SecondSubOpBeforeFirstCompletes(t *te
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operationPodName := volumetypes.UniquePodName("operation-podname")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, operationPodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -358,7 +358,7 @@ func Test_NestedPendingOperations_Negative_SecondOpBeforeFirstCompletesWithExpBa
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -382,7 +382,7 @@ func Test_NestedPendingOperations_Positive_ThirdOpAfterFirstCompletes(t *testing
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -426,7 +426,7 @@ func Test_NestedPendingOperations_Positive_ThirdOpAfterFirstCompletesWithExpBack
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -509,7 +509,7 @@ func Test_NestedPendingOperations_Positive_Wait(t *testing.T) {
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err != nil {
|
||||
@ -538,7 +538,7 @@ func Test_NestedPendingOperations_Positive_WaitWithExpBackoff(t *testing.T) {
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err := grm.Run(volumeName, EmptyUniquePodName, EmptyNodeName, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err != nil {
|
||||
@ -710,9 +710,9 @@ func Test_NestedPendingOperations_Positive_Issue_88355(t *testing.T) {
|
||||
)
|
||||
|
||||
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
|
||||
opZContinueCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
op1ContinueCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
op2ContinueCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
opZContinueCh := make(chan interface{})
|
||||
op1ContinueCh := make(chan interface{})
|
||||
op2ContinueCh := make(chan interface{})
|
||||
operationZ := generateWaitFunc(opZContinueCh)
|
||||
operation1 := generateWaitFunc(op1ContinueCh)
|
||||
operation2 := generateWaitWithErrorFunc(op2ContinueCh)
|
||||
@ -772,7 +772,7 @@ func testConcurrentOperationsPositive(
|
||||
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName1, podName1, nodeName1 /* nodeName */, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
@ -802,7 +802,7 @@ func testConcurrentOperationsNegative(
|
||||
|
||||
// Arrange
|
||||
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
|
||||
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
|
||||
operation1DoneCh := make(chan interface{})
|
||||
operation1 := generateWaitFunc(operation1DoneCh)
|
||||
err1 := grm.Run(volumeName1, podName1, nodeName1 /* nodeName */, volumetypes.GeneratedOperations{OperationFunc: operation1})
|
||||
if err1 != nil {
|
||||
|
@ -985,7 +985,7 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation(
|
||||
return false, fmt.Errorf("mounter was not set for a filesystem volume")
|
||||
}
|
||||
if isNotMount, mountCheckErr = mounter.IsLikelyNotMountPoint(mountPath); mountCheckErr != nil {
|
||||
return false, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v",
|
||||
return false, fmt.Errorf("could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeName,
|
||||
podName,
|
||||
@ -1008,7 +1008,7 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation(
|
||||
var islinkExist bool
|
||||
var checkErr error
|
||||
if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil {
|
||||
return false, fmt.Errorf("Could not check whether the block volume %q (spec.Name: %q) pod %q (UID: %q) is mapped to: %v",
|
||||
return false, fmt.Errorf("could not check whether the block volume %q (spec.Name: %q) pod %q (UID: %q) is mapped to: %v",
|
||||
uniqueVolumeName,
|
||||
volumeName,
|
||||
podName,
|
||||
|
@ -39,7 +39,6 @@ import (
|
||||
kevents "k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
ioutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
@ -623,7 +622,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
||||
if og.checkNodeCapabilitiesBeforeMount {
|
||||
if canMountErr := volumeMounter.CanMount(); canMountErr != nil {
|
||||
err = fmt.Errorf(
|
||||
"Verify that your node machine has the required components before attempting to mount this volume type. %s",
|
||||
"verify that your node machine has the required components before attempting to mount this volume type. %s",
|
||||
canMountErr)
|
||||
return volumeToMount.GenerateError("MountVolume.CanMount failed", err)
|
||||
}
|
||||
@ -631,7 +630,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
||||
|
||||
// Execute mount
|
||||
mountErr := volumeMounter.SetUp(volume.MounterArgs{
|
||||
FsUser: ioutil.FsUserFrom(volumeToMount.Pod),
|
||||
FsUser: util.FsUserFrom(volumeToMount.Pod),
|
||||
FsGroup: fsGroup,
|
||||
DesiredSize: volumeToMount.DesiredSizeLimit,
|
||||
FSGroupChangePolicy: fsGroupChangePolicy,
|
||||
@ -853,7 +852,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
||||
|
||||
if err != nil || util.HasMountRefs(deviceMountPath, refs) {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("The device mount path %q is still mounted by other references %v", deviceMountPath, refs)
|
||||
err = fmt.Errorf("the device mount path %q is still mounted by other references %v", deviceMountPath, refs)
|
||||
}
|
||||
return deviceToDetach.GenerateError("GetDeviceMountRefs check failed", err)
|
||||
}
|
||||
@ -1028,7 +1027,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
|
||||
devicePath = pluginDevicePath
|
||||
}
|
||||
if len(devicePath) == 0 {
|
||||
return volumeToMount.GenerateError("MapVolume failed", goerrors.New("Device path of the volume is empty"))
|
||||
return volumeToMount.GenerateError("MapVolume failed", goerrors.New("device path of the volume is empty"))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1059,7 +1058,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
|
||||
|
||||
// Execute common map
|
||||
volumeMapPath, volName := blockVolumeMapper.GetPodDeviceMapPath()
|
||||
mapErr := ioutil.MapBlockVolume(og.blkUtil, devicePath, globalMapPath, volumeMapPath, volName, volumeToMount.Pod.UID)
|
||||
mapErr := util.MapBlockVolume(og.blkUtil, devicePath, globalMapPath, volumeMapPath, volName, volumeToMount.Pod.UID)
|
||||
if mapErr != nil {
|
||||
// On failure, return error. Caller will log and retry.
|
||||
return volumeToMount.GenerateError("MapVolume.MapBlockVolume failed", mapErr)
|
||||
@ -1141,7 +1140,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
|
||||
globalUnmapPath := volumeToUnmount.DeviceMountPath
|
||||
|
||||
// Execute common unmap
|
||||
unmapErr := ioutil.UnmapBlockVolume(og.blkUtil, globalUnmapPath, podDeviceUnmapPath, volName, volumeToUnmount.PodUID)
|
||||
unmapErr := util.UnmapBlockVolume(og.blkUtil, globalUnmapPath, podDeviceUnmapPath, volName, volumeToUnmount.PodUID)
|
||||
if unmapErr != nil {
|
||||
// On failure, return error. Caller will log and retry.
|
||||
return volumeToUnmount.GenerateError("UnmapVolume.UnmapBlockVolume failed", unmapErr)
|
||||
@ -1232,7 +1231,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
|
||||
}
|
||||
}
|
||||
if len(refs) > 0 {
|
||||
err = fmt.Errorf("The device %q is still referenced from other Pods %v", globalMapPath, refs)
|
||||
err = fmt.Errorf("the device %q is still referenced from other Pods %v", globalMapPath, refs)
|
||||
return deviceToDetach.GenerateError("UnmapDevice failed", err)
|
||||
}
|
||||
|
||||
@ -1337,7 +1336,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
|
||||
// On failure, return error. Caller will log and retry.
|
||||
return volumeToMount.GenerateError(
|
||||
"VerifyControllerAttachedVolume failed",
|
||||
fmt.Errorf("Node object retrieved from API server is nil"))
|
||||
fmt.Errorf("node object retrieved from API server is nil"))
|
||||
}
|
||||
|
||||
for _, attachedVolume := range node.Status.VolumesAttached {
|
||||
@ -1408,11 +1407,11 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
|
||||
|
||||
volumePlugin, err := og.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
|
||||
if err != nil {
|
||||
return volumetypes.GeneratedOperations{}, fmt.Errorf("Error finding plugin for expanding volume: %q with error %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
|
||||
return volumetypes.GeneratedOperations{}, fmt.Errorf("error finding plugin for expanding volume: %q with error %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
|
||||
}
|
||||
|
||||
if volumePlugin == nil {
|
||||
return volumetypes.GeneratedOperations{}, fmt.Errorf("Can not find plugin for expanding volume: %q", util.GetPersistentVolumeClaimQualifiedName(pvc))
|
||||
return volumetypes.GeneratedOperations{}, fmt.Errorf("can not find plugin for expanding volume: %q", util.GetPersistentVolumeClaimQualifiedName(pvc))
|
||||
}
|
||||
|
||||
expandVolumeFunc := func() (error, error) {
|
||||
@ -1437,7 +1436,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
|
||||
// until they reflect user requested size in pvc.Status.Size
|
||||
updateErr := util.UpdatePVSize(pv, newSize, og.kubeClient)
|
||||
if updateErr != nil {
|
||||
detailedErr := fmt.Errorf("Error updating PV spec capacity for volume %q with : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), updateErr)
|
||||
detailedErr := fmt.Errorf("error updating PV spec capacity for volume %q with : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), updateErr)
|
||||
return detailedErr, detailedErr
|
||||
}
|
||||
|
||||
@ -1452,7 +1451,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
|
||||
klog.V(4).Infof("Controller resizing done for PVC %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
|
||||
err := util.MarkResizeFinished(pvc, newSize, og.kubeClient)
|
||||
if err != nil {
|
||||
detailedErr := fmt.Errorf("Error marking pvc %s as resized : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
|
||||
detailedErr := fmt.Errorf("error marking pvc %s as resized : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
|
||||
return detailedErr, detailedErr
|
||||
}
|
||||
successMsg := fmt.Sprintf("ExpandVolume succeeded for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
|
||||
@ -1460,7 +1459,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
|
||||
} else {
|
||||
err := util.MarkForFSResize(pvc, og.kubeClient)
|
||||
if err != nil {
|
||||
detailedErr := fmt.Errorf("Error updating pvc %s condition for fs resize : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
|
||||
detailedErr := fmt.Errorf("error updating pvc %s condition for fs resize : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
|
||||
klog.Warning(detailedErr)
|
||||
return nil, nil
|
||||
}
|
||||
@ -1630,7 +1629,7 @@ func (og *operationGenerator) nodeExpandVolume(
|
||||
pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Return error rather than leave the file system un-resized, caller will log and retry
|
||||
return false, fmt.Errorf("MountVolume.NodeExpandVolume get PVC failed : %v", err)
|
||||
return false, fmt.Errorf("mountVolume.NodeExpandVolume get PVC failed : %v", err)
|
||||
}
|
||||
|
||||
pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage]
|
||||
@ -1675,7 +1674,7 @@ func (og *operationGenerator) nodeExpandVolume(
|
||||
err = util.MarkFSResizeFinished(pvc, pvSpecCap, og.kubeClient)
|
||||
if err != nil {
|
||||
// On retry, NodeExpandVolume will be called again but do nothing
|
||||
return false, fmt.Errorf("MountVolume.NodeExpandVolume update PVC status failed : %v", err)
|
||||
return false, fmt.Errorf("mountVolume.NodeExpandVolume update PVC status failed : %v", err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@ -1687,7 +1686,7 @@ func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount
|
||||
mountOptions := util.MountOptionFromSpec(volumeToMount.VolumeSpec)
|
||||
|
||||
if len(mountOptions) > 0 && !plugin.SupportsMountOption() {
|
||||
return fmt.Errorf("Mount options are not supported for this volume type")
|
||||
return fmt.Errorf("mount options are not supported for this volume type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case _ = <-stopChannel:
|
||||
case <-stopChannel:
|
||||
return
|
||||
case eventEvent, ok := <-eventWatch.ResultChan():
|
||||
if !ok {
|
||||
|
@ -220,7 +220,7 @@ func (c *mockRecyclerClient) DeletePod(name, namespace string) error {
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
|
||||
eventCh := make(chan watch.Event, 0)
|
||||
eventCh := make(chan watch.Event)
|
||||
go func() {
|
||||
for _, e := range c.events {
|
||||
eventCh <- e
|
||||
|
@ -557,11 +557,11 @@ func doSafeOpen(pathname string, base string) (int, error) {
|
||||
var deviceStat unix.Stat_t
|
||||
err := unix.Fstat(childFD, &deviceStat)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Error running fstat on %s with %v", currentPath, err)
|
||||
return -1, fmt.Errorf("error running fstat on %s with %v", currentPath, err)
|
||||
}
|
||||
fileFmt := deviceStat.Mode & syscall.S_IFMT
|
||||
if fileFmt == syscall.S_IFLNK {
|
||||
return -1, fmt.Errorf("Unexpected symlink found %s", currentPath)
|
||||
return -1, fmt.Errorf("unexpected symlink found %s", currentPath)
|
||||
}
|
||||
|
||||
// Close parentFD
|
||||
|
@ -114,7 +114,7 @@ func SetReady(dir string) {
|
||||
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
|
||||
secret := make(map[string]string)
|
||||
if kubeClient == nil {
|
||||
return secret, fmt.Errorf("Cannot get kube client")
|
||||
return secret, fmt.Errorf("cannot get kube client")
|
||||
}
|
||||
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -137,7 +137,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
|
||||
return secret, err
|
||||
}
|
||||
if secrets.Type != v1.SecretType(volumePluginName) {
|
||||
return secret, fmt.Errorf("Cannot get secret of type %s", volumePluginName)
|
||||
return secret, fmt.Errorf("cannot get secret of type %s", volumePluginName)
|
||||
}
|
||||
for name, data := range secrets.Data {
|
||||
secret[name] = string(data)
|
||||
@ -148,11 +148,11 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
|
||||
// GetClassForVolume locates storage class by persistent volume
|
||||
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
return nil, fmt.Errorf("cannot get kube client")
|
||||
}
|
||||
className := v1helper.GetPersistentVolumeClass(pv)
|
||||
if className == "" {
|
||||
return nil, fmt.Errorf("Volume has no storage class")
|
||||
return nil, fmt.Errorf("volume has no storage class")
|
||||
}
|
||||
|
||||
class, err := kubeClient.StorageV1().StorageClasses().Get(context.TODO(), className, metav1.GetOptions{})
|
||||
@ -177,7 +177,7 @@ func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]stri
|
||||
terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms
|
||||
klog.V(10).Infof("Match for Required node selector terms %+v", terms)
|
||||
if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) {
|
||||
return fmt.Errorf("No matching NodeSelectorTerms")
|
||||
return fmt.Errorf("no matching NodeSelectorTerms")
|
||||
}
|
||||
}
|
||||
|
||||
@ -241,7 +241,7 @@ func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
|
||||
func GetPath(mounter volume.Mounter) (string, error) {
|
||||
path := mounter.GetPath()
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
|
||||
return "", fmt.Errorf("path is empty %s", reflect.TypeOf(mounter).String())
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user