From b225d6c7ac42fce1ab46acc1e99a592438b512f6 Mon Sep 17 00:00:00 2001 From: Alexandru Matei Date: Mon, 23 Jan 2023 14:55:35 +0200 Subject: [PATCH] kubelet: Fix fs quota monitoring on volumes File system quota monitoring setup fails on subsequent invocations, each time quota setup is invoked a new random UID is generated for each pod and compared with the previously stored UID for the folder. Fix it by keeping track of mapping between internal uid generated for a pod and actual external pod uid. Signed-off-by: Alexandru Matei --- pkg/volume/emptydir/empty_dir.go | 6 ----- pkg/volume/util/fsquota/project.go | 4 +-- pkg/volume/util/fsquota/quota_linux.go | 37 +++++++++++++++++++------- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/pkg/volume/emptydir/empty_dir.go b/pkg/volume/emptydir/empty_dir.go index 9e5c0e4b33a..9ad981c54bd 100644 --- a/pkg/volume/emptydir/empty_dir.go +++ b/pkg/volume/emptydir/empty_dir.go @@ -301,12 +301,6 @@ func (ed *emptyDir) assignQuota(dir string, mounterSize *resource.Quantity) erro if err != nil { klog.V(3).Infof("Unable to check for quota support on %s: %s", dir, err.Error()) } else if hasQuotas { - _, err := fsquota.GetQuotaOnDir(ed.mounter, dir) - if err != nil { - klog.V(4).Infof("Attempt to check quota on dir %s failed: %v", dir, err) - // this is not a fatal error so return nil - return nil - } klog.V(4).Infof("emptydir trying to assign quota %v on %s", mounterSize, dir) if err := fsquota.AssignQuota(ed.mounter, dir, ed.pod.UID, mounterSize); err != nil { klog.V(3).Infof("Set quota on %s failed %s", dir, err.Error()) diff --git a/pkg/volume/util/fsquota/project.go b/pkg/volume/util/fsquota/project.go index f57454b076d..8ebc0068740 100644 --- a/pkg/volume/util/fsquota/project.go +++ b/pkg/volume/util/fsquota/project.go @@ -190,13 +190,13 @@ func addDirToProject(path string, id common.QuotaID, list *projectsList) (common idMap := make(map[common.QuotaID]bool) for _, project := range list.projects { if project.data == path { - if id != project.id { + if id != common.BadQuotaID && id != project.id { return common.BadQuotaID, false, fmt.Errorf("attempt to reassign project ID for %s", path) } // Trying to reassign a directory to the project it's // already in. Maybe this should be an error, but for // now treat it as an idempotent operation - return id, false, nil + return project.id, false, nil } idMap[project.id] = true } diff --git a/pkg/volume/util/fsquota/quota_linux.go b/pkg/volume/util/fsquota/quota_linux.go index e7ba29b1b08..240cc356ee8 100644 --- a/pkg/volume/util/fsquota/quota_linux.go +++ b/pkg/volume/util/fsquota/quota_linux.go @@ -35,6 +35,9 @@ import ( "k8s.io/kubernetes/pkg/volume/util/fsquota/common" ) +// Pod -> External Pod UID +var podUidMap = make(map[types.UID]types.UID) + // Pod -> ID var podQuotaMap = make(map[types.UID]common.QuotaID) @@ -314,20 +317,32 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour } quotaLock.Lock() defer quotaLock.Unlock() - // Current policy is to set individual quotas on each volumes. + // Current policy is to set individual quotas on each volume, + // for each new volume we generate a random UUID and we use that as + // the internal pod uid. + // From fsquota point of view each volume is attached to a + // single unique pod. // If we decide later that we want to assign one quota for all - // volumes in a pod, we can simply remove this line of code. + // volumes in a pod, we can simply use poduid parameter directly // If and when we decide permanently that we're going to adopt // one quota per volume, we can rip all of the pod code out. - volumeuid := types.UID(uuid.NewUUID()) - if pod, ok := dirPodMap[path]; ok && pod != volumeuid { - return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, pod, volumeuid) + externalPodUid := poduid + internalPodUid, ok := dirPodMap[path] + if ok { + if podUidMap[internalPodUid] != externalPodUid { + return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, podUidMap[internalPodUid], externalPodUid) + } + } else { + internalPodUid = types.UID(uuid.NewUUID()) } - oid, ok := podQuotaMap[volumeuid] + oid, ok := podQuotaMap[internalPodUid] if ok { if quotaSizeMap[oid] != ibytes { return fmt.Errorf("requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes) } + if _, ok := dirPodMap[path]; ok { + return nil + } } else { oid = common.BadQuotaID } @@ -342,12 +357,13 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour ibytes = -1 } if err = setQuotaOnDir(path, id, ibytes); err == nil { - quotaPodMap[id] = volumeuid + quotaPodMap[id] = internalPodUid quotaSizeMap[id] = ibytes - podQuotaMap[volumeuid] = id + podQuotaMap[internalPodUid] = id dirQuotaMap[path] = id - dirPodMap[path] = volumeuid - podDirCountMap[volumeuid]++ + dirPodMap[path] = internalPodUid + podUidMap[internalPodUid] = externalPodUid + podDirCountMap[internalPodUid]++ klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path) return nil } @@ -436,6 +452,7 @@ func ClearQuota(m mount.Interface, path string) error { delete(quotaPodMap, podQuotaMap[poduid]) delete(podDirCountMap, poduid) delete(podQuotaMap, poduid) + delete(podUidMap, poduid) } else { err = removeProjectID(path, projid) podDirCountMap[poduid]--