mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Use xfs_quota command to apply quotas
This commit is contained in:
parent
448e0c44c6
commit
f8661d6240
@ -446,7 +446,7 @@ const (
|
||||
//
|
||||
// Allow use of filesystems for ephemeral storage monitoring.
|
||||
// Only applies if LocalStorageCapacityIsolation is set.
|
||||
FSQuotaForLSCIMonitoring = "FSQuotaForLSCIMonitoring"
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -521,7 +521,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha},
|
||||
KubeletPodResources: {Default: false, PreRelease: featuregate.Alpha},
|
||||
WindowsGMSA: {Default: false, PreRelease: featuregate.Alpha},
|
||||
FSQuotaForLSCIMonitoring: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
@ -62,6 +62,7 @@ go_library(
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulerutils "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
volumeutils "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -395,27 +396,11 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt
|
||||
}, nil
|
||||
}
|
||||
|
||||
func internalIsLocalEphemeralVolume(pod *v1.Pod, volume v1.Volume) bool {
|
||||
return volume.GitRepo != nil ||
|
||||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
|
||||
volume.ConfigMap != nil || volume.DownwardAPI != nil
|
||||
}
|
||||
|
||||
// IsLocalEphemeralVolume determines whether a given volume name is ephemeral
|
||||
func IsLocalEphemeralVolume(pod *v1.Pod, volumeName string) (bool, error) {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Name == volumeName {
|
||||
return internalIsLocalEphemeralVolume(pod, volume), nil
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("Volume %s not found in pod %v", volumeName, pod)
|
||||
}
|
||||
|
||||
// localEphemeralVolumeNames returns the set of ephemeral volumes for the pod that are local
|
||||
func localEphemeralVolumeNames(pod *v1.Pod) []string {
|
||||
result := []string{}
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if internalIsLocalEphemeralVolume(pod, volume) {
|
||||
if volumeutils.IsLocalEphemeralVolume(volume) {
|
||||
result = append(result, volume.Name)
|
||||
}
|
||||
}
|
||||
|
2
pkg/kubelet/volumemanager/cache/BUILD
vendored
2
pkg/kubelet/volumemanager/cache/BUILD
vendored
@ -16,12 +16,12 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/eviction:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
apiv1resource "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
limits "k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@ -165,7 +165,7 @@ type volumeToMount struct {
|
||||
|
||||
// desiredSizeLimit indicates the desired upper bound on the size of the volume
|
||||
// (if so implemented)
|
||||
desiredSizeLimit int64
|
||||
desiredSizeLimit *resource.Quantity
|
||||
}
|
||||
|
||||
// The pod object represents a pod that references the underlying volume and
|
||||
@ -232,15 +232,12 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
}
|
||||
|
||||
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||
var sizeLimit int64
|
||||
sizeLimit = 0
|
||||
isLocal, _ := limits.IsLocalEphemeralVolume(pod, volumeSpec.Name())
|
||||
if isLocal {
|
||||
_, podLimits := apiv1resource.PodRequestsAndLimits(pod)
|
||||
ephemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
|
||||
sizeLimit = ephemeralStorageLimit.Value()
|
||||
if sizeLimit == 0 {
|
||||
sizeLimit = -1
|
||||
var sizeLimit *resource.Quantity
|
||||
if volumeSpec.Volume != nil {
|
||||
if util.IsLocalEphemeralVolume(*volumeSpec.Volume) {
|
||||
_, podLimits := apiv1resource.PodRequestsAndLimits(pod)
|
||||
ephemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
|
||||
sizeLimit = resource.NewQuantity(ephemeralStorageLimit.Value(), resource.BinarySI)
|
||||
}
|
||||
}
|
||||
dsw.volumesToMount[volumeName] = volumeToMount{
|
||||
|
@ -236,17 +236,20 @@ func (ed *emptyDir) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
if err == nil {
|
||||
volumeutil.SetReady(ed.getMetaDir())
|
||||
}
|
||||
if mounterArgs.DesiredSize != 0 {
|
||||
if hasQuotas, _ := quota.SupportsQuotas(ed.mounter, dir); hasQuotas {
|
||||
klog.V(3).Infof("emptydir trying to assign quota")
|
||||
if mounterArgs.DesiredSize != nil {
|
||||
hasQuotas, err := quota.SupportsQuotas(ed.mounter, dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to check for quota support on %s: %s", dir, err.Error())
|
||||
}
|
||||
if hasQuotas {
|
||||
klog.V(4).Infof("emptydir trying to assign quota %v on %s", mounterArgs.DesiredSize, dir)
|
||||
err := quota.AssignQuota(ed.mounter, dir, mounterArgs.PodUID, mounterArgs.DesiredSize)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Set quota failed %v", err)
|
||||
return fmt.Errorf("Set quota on %s failed %s", dir, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupTmpfs creates a tmpfs mount at the specified directory.
|
||||
@ -411,7 +414,7 @@ func (ed *emptyDir) teardownDefault(dir string) error {
|
||||
// Remove any quota
|
||||
err := quota.ClearQuota(ed.mounter, dir)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Failed to clear quota on %s: %v", dir, err)
|
||||
klog.Warningf("Warning: Failed to clear quota on %s: %v", dir, err)
|
||||
}
|
||||
// Renaming the directory is not required anymore because the operation executor
|
||||
// now handles duplicate operations on the same volume
|
||||
|
@ -58,11 +58,13 @@ func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
|
||||
// DiskUsage gets disk usage of specified path.
|
||||
func DiskUsage(path string) (*resource.Quantity, error) {
|
||||
// First check whether the quota system knows about this directory
|
||||
// A nil quantity with no error means that the path does not support quotas
|
||||
// and we should use other mechanisms.
|
||||
data, err := quota.GetConsumption(path)
|
||||
if err == nil {
|
||||
var q resource.Quantity
|
||||
q.Set(data)
|
||||
return &q, nil
|
||||
if data != nil {
|
||||
return data, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("unable to retrieve disk consumption via quota for %s: %v", path, err)
|
||||
}
|
||||
// Uses the same niceness level as cadvisor.fs does when running du
|
||||
// Uses -B 1 to always scale to a blocksize of 1 byte
|
||||
@ -85,9 +87,13 @@ func Find(path string) (int64, error) {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
// First check whether the quota system knows about this directory
|
||||
// A nil quantity with no error means that the path does not support quotas
|
||||
// and we should use other mechanisms.
|
||||
inodes, err := quota.GetInodes(path)
|
||||
if err == nil {
|
||||
return inodes, nil
|
||||
if inodes != nil {
|
||||
return inodes.Value(), nil
|
||||
} else if err != nil {
|
||||
return 0, fmt.Errorf("unable to retrieve inode consumption via quota for %s: %v", path, err)
|
||||
}
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
|
@ -25,6 +25,7 @@ go_library(
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -349,7 +350,7 @@ type VolumeToMount struct {
|
||||
|
||||
// DesiredSizeLimit indicates the desired upper bound on the size of the volume
|
||||
// (if so implemented)
|
||||
DesiredSizeLimit int64
|
||||
DesiredSizeLimit *resource.Quantity
|
||||
}
|
||||
|
||||
// GenerateMsgDetailed returns detailed msgs for volumes to mount
|
||||
|
@ -13,12 +13,11 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume/util/quota/common:go_default_library",
|
||||
"//pkg/volume/util/quota/extfs:go_default_library",
|
||||
"//pkg/volume/util/quota/xfs:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
@ -36,7 +35,9 @@ go_test(
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume/util/quota/common:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -54,8 +55,6 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/volume/util/quota/common:all-srcs",
|
||||
"//pkg/volume/util/quota/extfs:all-srcs",
|
||||
"//pkg/volume/util/quota/xfs:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
|
@ -6,12 +6,10 @@ go_library(
|
||||
"quota_linux_common.go",
|
||||
"quota_linux_common_impl.go",
|
||||
],
|
||||
cgo = True,
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/quota/common",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
|
@ -18,18 +18,46 @@ limitations under the License.
|
||||
|
||||
package common
|
||||
|
||||
// QuotaID -- generic quota identifier
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// QuotaID is generic quota identifier.
|
||||
// Data type based on quotactl(2).
|
||||
type QuotaID int32
|
||||
|
||||
const (
|
||||
// UnknownQuotaID -- cannot determine whether a quota is in force
|
||||
UnknownQuotaID QuotaID = -1
|
||||
// BadQuotaID -- Invalid quota
|
||||
BadQuotaID QuotaID = 0
|
||||
)
|
||||
|
||||
const (
|
||||
acct = iota
|
||||
enforcing = iota
|
||||
)
|
||||
|
||||
// QuotaType -- type of quota to be applied
|
||||
type QuotaType int
|
||||
|
||||
const (
|
||||
// FSQuotaAccounting for quotas for accounting only
|
||||
FSQuotaAccounting QuotaType = 1 << iota
|
||||
// FSQuotaEnforcing for quotas for enforcement
|
||||
FSQuotaEnforcing QuotaType = 1 << iota
|
||||
)
|
||||
|
||||
// FirstQuota is the quota ID we start with.
|
||||
// XXXXXXX Need a better way of doing this...
|
||||
var FirstQuota QuotaID = 1048577
|
||||
|
||||
// MountsFile is the location of the system mount data
|
||||
var MountsFile = "/proc/self/mounts"
|
||||
|
||||
// MountParseRegexp parses out /proc/sys/self/mounts
|
||||
var MountParseRegexp = regexp.MustCompilePOSIX("^([^ ]*)[ \t]*([^ ]*)[ \t]*([^ ]*)") // Ignore options etc.
|
||||
|
||||
// LinuxVolumeQuotaProvider returns an appropriate quota applier
|
||||
// object if we can support quotas on this device
|
||||
type LinuxVolumeQuotaProvider interface {
|
||||
@ -59,7 +87,7 @@ type LinuxVolumeQuotaApplier interface {
|
||||
// Return value of false with no error means that the ID is not
|
||||
// in use; true means that it is already in use. An error
|
||||
// return means that any quota ID will fail.
|
||||
QuotaIDIsInUse(path string, id QuotaID) (bool, error)
|
||||
QuotaIDIsInUse(id QuotaID) (bool, error)
|
||||
|
||||
// GetConsumption returns the consumption (in bytes) of the
|
||||
// directory, determined by the implementation's quota-based
|
||||
|
@ -18,135 +18,269 @@ limitations under the License.
|
||||
|
||||
package common
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <dirent.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/dqblk_xfs.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef FS_XFLAG_PROJINHERIT
|
||||
struct fsxattr {
|
||||
__u32 fsx_xflags;
|
||||
__u32 fsx_extsize;
|
||||
__u32 fsx_nextents;
|
||||
__u32 fsx_projid;
|
||||
unsigned char fsx_pad[12];
|
||||
};
|
||||
#define FS_XFLAG_PROJINHERIT 0x00000200
|
||||
#endif
|
||||
#ifndef FS_IOC_FSGETXATTR
|
||||
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
||||
#endif
|
||||
#ifndef FS_IOC_FSSETXATTR
|
||||
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
|
||||
#endif
|
||||
|
||||
#ifndef PRJQUOTA
|
||||
#define PRJQUOTA 2
|
||||
#endif
|
||||
#ifndef Q_XGETQSTAT_PRJQUOTA
|
||||
#define Q_XGETQSTAT_PRJQUOTA QCMD(Q_XGETQSTAT, PRJQUOTA)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// IsFilesystemOfType determines whether the filesystem specified is of the type
|
||||
// specified by the magic number
|
||||
func IsFilesystemOfType(mountpoint string, backingDev string, magic int64) bool {
|
||||
var buf syscall.Statfs_t
|
||||
err := syscall.Statfs(mountpoint, &buf)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Extfs Unable to statfs %s: %v", mountpoint, err)
|
||||
return false
|
||||
}
|
||||
if buf.Type != magic {
|
||||
return false
|
||||
}
|
||||
var quotaCmd string
|
||||
var quotaCmdInitialized bool
|
||||
var quotaCmdLock sync.RWMutex
|
||||
|
||||
var qstat C.fs_quota_stat_t
|
||||
CPath := C.CString(backingDev)
|
||||
defer free(CPath)
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, uintptr(C.Q_XGETQSTAT_PRJQUOTA), uintptr(unsafe.Pointer(CPath)), 0, uintptr(unsafe.Pointer(&qstat)), 0, 0)
|
||||
return errno == 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ENFD > 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ACCT > 0
|
||||
// If we later get a filesystem that uses project quota semantics other than
|
||||
// XFS, we'll need to change this.
|
||||
// Higher levels don't need to know what's inside
|
||||
type linuxFilesystemType struct {
|
||||
name string
|
||||
typeMagic int64 // Filesystem magic number, per statfs(2)
|
||||
maxQuota int64
|
||||
allowEmptyOutput bool // Accept empty output from "quota" command
|
||||
}
|
||||
|
||||
func free(p *C.char) {
|
||||
C.free(unsafe.Pointer(p))
|
||||
const (
|
||||
bitsPerWord = 32 << (^uint(0) >> 63) // either 32 or 64
|
||||
)
|
||||
|
||||
var (
|
||||
linuxSupportedFilesystems = []linuxFilesystemType{
|
||||
{
|
||||
name: "XFS",
|
||||
typeMagic: 0x58465342,
|
||||
maxQuota: 1<<(bitsPerWord-1) - 1,
|
||||
allowEmptyOutput: true, // XFS filesystems report nothing if a quota is not present
|
||||
}, {
|
||||
name: "ext4fs",
|
||||
typeMagic: 0xef53,
|
||||
maxQuota: (1<<(bitsPerWord-1) - 1) & (1<<58 - 1),
|
||||
allowEmptyOutput: false, // ext4 filesystems always report something even if a quota is not present
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// VolumeProvider supplies a quota applier to the generic code.
|
||||
type VolumeProvider struct {
|
||||
}
|
||||
|
||||
func openDir(path string) (*C.DIR, error) {
|
||||
Cpath := C.CString(path)
|
||||
defer free(Cpath)
|
||||
var quotaCmds = []string{"/sbin/xfs_quota",
|
||||
"/usr/sbin/xfs_quota",
|
||||
"/bin/xfs_quota"}
|
||||
|
||||
dir := C.opendir(Cpath)
|
||||
if dir == nil {
|
||||
return nil, fmt.Errorf("Can't open dir")
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
var quotaParseRegexp = regexp.MustCompilePOSIX("^[^ \t]*[ \t]*([0-9]+)")
|
||||
|
||||
func closeDir(dir *C.DIR) {
|
||||
if dir != nil {
|
||||
C.closedir(dir)
|
||||
}
|
||||
}
|
||||
var lsattrCmd = "/usr/bin/lsattr"
|
||||
var lsattrParseRegexp = regexp.MustCompilePOSIX("^ *([0-9]+) [^ ]+ (.*)$")
|
||||
|
||||
func getDirFd(dir *C.DIR) uintptr {
|
||||
return uintptr(C.dirfd(dir))
|
||||
}
|
||||
|
||||
// GetQuotaOnDir retrieves the quota ID (if any) associated with the specified directory
|
||||
func GetQuotaOnDir(path string) (QuotaID, error) {
|
||||
dir, err := openDir(path)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Can't open directory %s: %#+v", path, err)
|
||||
return BadQuotaID, err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return BadQuotaID, fmt.Errorf("Failed to get quota ID for %s: %v", path, errno.Error())
|
||||
}
|
||||
if fsx.fsx_projid == 0 {
|
||||
return BadQuotaID, fmt.Errorf("Failed to get quota ID for %s: %s", path, "no applicable quota")
|
||||
}
|
||||
return QuotaID(fsx.fsx_projid), nil
|
||||
}
|
||||
|
||||
// ApplyProjectToDir applies the specified quota ID to the specified directory
|
||||
func ApplyProjectToDir(path string, id QuotaID) error {
|
||||
dir, err := openDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("Failed to get quota ID for %s: %v", path, errno.Error())
|
||||
}
|
||||
fsx.fsx_projid = C.__u32(id)
|
||||
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
|
||||
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("Failed to set quota ID for %s: %v", path, errno.Error())
|
||||
// GetQuotaApplier -- does this backing device support quotas that
|
||||
// can be applied to directories?
|
||||
func (*VolumeProvider) GetQuotaApplier(mountpoint string, backingDev string) LinuxVolumeQuotaApplier {
|
||||
for _, fsType := range linuxSupportedFilesystems {
|
||||
if isFilesystemOfType(mountpoint, backingDev, fsType.typeMagic) {
|
||||
return linuxVolumeQuotaApplier{mountpoint: mountpoint,
|
||||
maxQuota: fsType.maxQuota,
|
||||
allowEmptyOutput: fsType.allowEmptyOutput,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type linuxVolumeQuotaApplier struct {
|
||||
mountpoint string
|
||||
maxQuota int64
|
||||
allowEmptyOutput bool
|
||||
}
|
||||
|
||||
func getXFSQuotaCmd() (string, error) {
|
||||
quotaCmdLock.Lock()
|
||||
defer quotaCmdLock.Unlock()
|
||||
if quotaCmdInitialized {
|
||||
return quotaCmd, nil
|
||||
}
|
||||
for _, program := range quotaCmds {
|
||||
fileinfo, err := os.Stat(program)
|
||||
if err == nil && ((fileinfo.Mode().Perm() & (1 << 6)) != 0) {
|
||||
klog.V(3).Infof("Found xfs_quota program %s", program)
|
||||
quotaCmd = program
|
||||
quotaCmdInitialized = true
|
||||
return quotaCmd, nil
|
||||
}
|
||||
}
|
||||
quotaCmdInitialized = true
|
||||
return "", fmt.Errorf("No xfs_quota program found")
|
||||
}
|
||||
|
||||
func doRunXFSQuotaCommand(mountpoint string, mountsFile, command string) (string, error) {
|
||||
quotaCmd, err := getXFSQuotaCmd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// We're using numeric project IDs directly; no need to scan
|
||||
// /etc/projects or /etc/projid
|
||||
klog.V(4).Infof("runXFSQuotaCommand %s -t %s -P/dev/null -D/dev/null -x -f %s -c %s", quotaCmd, mountsFile, mountpoint, command)
|
||||
cmd := exec.Command(quotaCmd, "-t", mountsFile, "-P/dev/null", "-D/dev/null", "-x", "-f", mountpoint, "-c", command)
|
||||
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
klog.V(4).Infof("runXFSQuotaCommand output %q", string(data))
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// Extract the mountpoint we care about into a temporary mounts file so that xfs_quota does
|
||||
// not attempt to scan every mount on the filesystem, which could hang if e. g.
|
||||
// a stuck NFS mount is present.
|
||||
// See https://bugzilla.redhat.com/show_bug.cgi?id=237120 for an example
|
||||
// of the problem that could be caused if this were to happen.
|
||||
func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
|
||||
tmpMounts, err := ioutil.TempFile("", "mounts")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot create temporary mount file: %v", err)
|
||||
}
|
||||
tmpMountsFileName := tmpMounts.Name()
|
||||
defer tmpMounts.Close()
|
||||
defer os.Remove(tmpMountsFileName)
|
||||
|
||||
mounts, err := os.Open(MountsFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot open mounts file %s: %v", MountsFile, err)
|
||||
}
|
||||
defer mounts.Close()
|
||||
|
||||
scanner := bufio.NewScanner(mounts)
|
||||
for scanner.Scan() {
|
||||
match := MountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
if match != nil {
|
||||
mount := match[2]
|
||||
if mount == mountpoint {
|
||||
if _, err := tmpMounts.WriteString(fmt.Sprintf("%s\n", scanner.Text())); err != nil {
|
||||
return "", fmt.Errorf("Cannot write temporary mounts file: %v", err)
|
||||
}
|
||||
if err := tmpMounts.Sync(); err != nil {
|
||||
return "", fmt.Errorf("Cannot sync temporary mounts file: %v", err)
|
||||
}
|
||||
return doRunXFSQuotaCommand(mountpoint, tmpMountsFileName, command)
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Cannot run xfs_quota: cannot find mount point %s in %s", mountpoint, MountsFile)
|
||||
}
|
||||
|
||||
// SupportsQuotas determines whether the filesystem supports quotas.
|
||||
func SupportsQuotas(mountpoint string, qType QuotaType) (bool, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, "state -p")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if qType == FSQuotaEnforcing {
|
||||
return strings.Contains(data, "Enforcement: ON"), nil
|
||||
}
|
||||
return strings.Contains(data, "Accounting: ON"), nil
|
||||
}
|
||||
|
||||
func isFilesystemOfType(mountpoint string, backingDev string, typeMagic int64) bool {
|
||||
var buf syscall.Statfs_t
|
||||
err := syscall.Statfs(mountpoint, &buf)
|
||||
if err != nil {
|
||||
klog.Warningf("Warning: Unable to statfs %s: %v", mountpoint, err)
|
||||
return false
|
||||
}
|
||||
if int64(buf.Type) != typeMagic {
|
||||
return false
|
||||
}
|
||||
if answer, _ := SupportsQuotas(mountpoint, FSQuotaAccounting); answer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetQuotaOnDir retrieves the quota ID (if any) associated with the specified directory
|
||||
// If we can't make system calls, all we can say is that we don't know whether
|
||||
// it has a quota, and higher levels have to make the call.
|
||||
func (v linuxVolumeQuotaApplier) GetQuotaOnDir(path string) (QuotaID, error) {
|
||||
cmd := exec.Command(lsattrCmd, "-pd", path)
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("cannot run lsattr: %v", err)
|
||||
}
|
||||
match := lsattrParseRegexp.FindStringSubmatch(string(data))
|
||||
if match == nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse lsattr -pd %s output %s", path, string(data))
|
||||
}
|
||||
if match[2] != path {
|
||||
return BadQuotaID, fmt.Errorf("Mismatch between supplied and returned path (%s != %s)", path, match[2])
|
||||
}
|
||||
projid, err := strconv.ParseInt(match[1], 10, 32)
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse project ID from %s (%v)", match[1], err)
|
||||
}
|
||||
return QuotaID(projid), nil
|
||||
}
|
||||
|
||||
// SetQuotaOnDir applies a quota to the specified directory under the specified mountpoint.
|
||||
func (v linuxVolumeQuotaApplier) SetQuotaOnDir(path string, id QuotaID, bytes int64) error {
|
||||
if bytes < 0 || bytes > v.maxQuota {
|
||||
bytes = v.maxQuota
|
||||
}
|
||||
_, err := runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("limit -p bhard=%v bsoft=%v %v", bytes, bytes, id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("project -s -p %s %v", path, id))
|
||||
return err
|
||||
}
|
||||
|
||||
func getQuantity(mountpoint string, id QuotaID, xfsQuotaArg string, multiplier int64, allowEmptyOutput bool) (int64, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, fmt.Sprintf("quota -p -N -n -v %s %v", xfsQuotaArg, id))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to run xfs_quota: %v", err)
|
||||
}
|
||||
if data == "" && allowEmptyOutput {
|
||||
return 0, nil
|
||||
}
|
||||
match := quotaParseRegexp.FindStringSubmatch(data)
|
||||
if match == nil {
|
||||
return 0, fmt.Errorf("Unable to parse quota output '%s'", data)
|
||||
}
|
||||
size, err := strconv.ParseInt(match[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to parse data size '%s' from '%s': %v", match[1], data, err)
|
||||
}
|
||||
klog.V(4).Infof("getQuantity %s %d %s %d => %d %v", mountpoint, id, xfsQuotaArg, multiplier, size, err)
|
||||
return size * multiplier, nil
|
||||
}
|
||||
|
||||
// GetConsumption returns the consumption in bytes if available via quotas
|
||||
func (v linuxVolumeQuotaApplier) GetConsumption(_ string, id QuotaID) (int64, error) {
|
||||
return getQuantity(v.mountpoint, id, "-b", 1024, v.allowEmptyOutput)
|
||||
}
|
||||
|
||||
// GetInodes returns the inodes in use if available via quotas
|
||||
func (v linuxVolumeQuotaApplier) GetInodes(_ string, id QuotaID) (int64, error) {
|
||||
return getQuantity(v.mountpoint, id, "-i", 1, v.allowEmptyOutput)
|
||||
}
|
||||
|
||||
// QuotaIDIsInUse checks whether the specified quota ID is in use on the specified
|
||||
// filesystem
|
||||
func (v linuxVolumeQuotaApplier) QuotaIDIsInUse(id QuotaID) (bool, error) {
|
||||
bytes, err := v.GetConsumption(v.mountpoint, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if bytes > 0 {
|
||||
return true, nil
|
||||
}
|
||||
inodes, err := v.GetInodes(v.mountpoint, id)
|
||||
return inodes > 0, err
|
||||
}
|
||||
|
@ -1,31 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["quota_extfs.go"],
|
||||
cgo = True,
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/quota/extfs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume/util/quota/common:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -1,153 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package extfs
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <dirent.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/quota.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef PRJQUOTA
|
||||
#define PRJQUOTA 2
|
||||
#endif
|
||||
#ifndef Q_SETPQUOTA
|
||||
#define Q_SETPQUOTA (unsigned) QCMD(Q_SETQUOTA, PRJQUOTA)
|
||||
#endif
|
||||
#ifndef Q_GETPQUOTA
|
||||
#define Q_GETPQUOTA (unsigned) QCMD(Q_GETQUOTA, PRJQUOTA)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
// ext4fs empirically has a maximum quota size of 2^48 - 1 1KiB blocks (256 petabytes)
|
||||
const (
|
||||
linuxExtfsMagic = 0xef53
|
||||
quotaBsize = 1024 // extfs specific
|
||||
bitsPerWord = 32 << (^uint(0) >> 63) // either 32 or 64
|
||||
maxQuota int64 = (1<<(bitsPerWord-1) - 1) & (1<<58 - 1) // either 1<<31 - 1 or 1<<58 - 1
|
||||
)
|
||||
|
||||
// VolumeProvider supplies a quota applier to the generic code.
|
||||
type VolumeProvider struct {
|
||||
}
|
||||
|
||||
// GetQuotaApplier -- does this backing device support quotas that
|
||||
// can be applied to directories?
|
||||
func (*VolumeProvider) GetQuotaApplier(mountpoint string, backingDev string) common.LinuxVolumeQuotaApplier {
|
||||
if common.IsFilesystemOfType(mountpoint, backingDev, linuxExtfsMagic) {
|
||||
return extfsVolumeQuota{backingDev}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type extfsVolumeQuota struct {
|
||||
backingDev string
|
||||
}
|
||||
|
||||
// GetQuotaOnDir -- get the quota ID that applies to this directory.
|
||||
|
||||
func (v extfsVolumeQuota) GetQuotaOnDir(path string) (common.QuotaID, error) {
|
||||
return common.GetQuotaOnDir(path)
|
||||
}
|
||||
|
||||
// SetQuotaOnDir -- apply the specified quota to the directory. If
|
||||
// bytes is not greater than zero, the quota should be applied in a
|
||||
// way that is non-enforcing (either explicitly so or by setting a
|
||||
// quota larger than anything the user may possibly create)
|
||||
func (v extfsVolumeQuota) SetQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
|
||||
klog.V(3).Infof("extfsSetQuotaOn %s ID %v bytes %v", path, id, bytes)
|
||||
if bytes < 0 || bytes > maxQuota {
|
||||
bytes = maxQuota
|
||||
}
|
||||
|
||||
var d C.struct_if_dqblk
|
||||
|
||||
d.dqb_bhardlimit = C.__u64(bytes / quotaBsize)
|
||||
d.dqb_bsoftlimit = d.dqb_bhardlimit
|
||||
d.dqb_ihardlimit = 0
|
||||
d.dqb_isoftlimit = 0
|
||||
d.dqb_valid = C.QIF_LIMITS
|
||||
|
||||
var cs = C.CString(v.backingDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_SETPQUOTA,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("Failed to set quota limit for ID %d on %s: %v",
|
||||
id, path, errno.Error())
|
||||
}
|
||||
return common.ApplyProjectToDir(path, id)
|
||||
}
|
||||
|
||||
func (v extfsVolumeQuota) getQuotaInfo(path string, id common.QuotaID) (C.struct_if_dqblk, syscall.Errno) {
|
||||
var d C.struct_if_dqblk
|
||||
|
||||
var cs = C.CString(v.backingDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_GETPQUOTA,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(id)),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return d, errno
|
||||
}
|
||||
|
||||
// QuotaIDIsInUse -- determine whether the quota ID is already in use.
|
||||
func (v extfsVolumeQuota) QuotaIDIsInUse(path string, id common.QuotaID) (bool, error) {
|
||||
d, errno := v.getQuotaInfo(path, id)
|
||||
isInUse := !(d.dqb_bhardlimit == 0 && d.dqb_bsoftlimit == 0 && d.dqb_curspace == 0 &&
|
||||
d.dqb_ihardlimit == 0 && d.dqb_isoftlimit == 0 && d.dqb_curinodes == 0 &&
|
||||
d.dqb_btime == 0 && d.dqb_itime == 0)
|
||||
return errno == 0 && isInUse, nil
|
||||
}
|
||||
|
||||
// GetConsumption -- retrieve the consumption (in bytes) of the directory
|
||||
// Note that with ext[[:digit:]]fs the quota consumption is in bytes
|
||||
// per man quotactl
|
||||
func (v extfsVolumeQuota) GetConsumption(path string, id common.QuotaID) (int64, error) {
|
||||
d, errno := v.getQuotaInfo(path, id)
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("Failed to get quota for %s: %s", path, errno.Error())
|
||||
}
|
||||
klog.V(3).Infof("Consumption for %s is %v", path, d.dqb_curspace)
|
||||
return int64(d.dqb_curspace), nil
|
||||
}
|
||||
|
||||
// GetInodes -- retrieve the number of inodes in use under the directory
|
||||
func (v extfsVolumeQuota) GetInodes(path string, id common.QuotaID) (int64, error) {
|
||||
d, errno := v.getQuotaInfo(path, id)
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("Failed to get quota for %s: %s", path, errno.Error())
|
||||
}
|
||||
klog.V(3).Infof("Inode consumption for %s is %v", path, d.dqb_curinodes)
|
||||
return int64(d.dqb_curinodes), nil
|
||||
}
|
@ -29,15 +29,14 @@ import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
var projectsFile = "/etc/projects"
|
||||
var projidFile = "/etc/projid"
|
||||
|
||||
var projectsParseRegexp *regexp.Regexp = regexp.MustCompilePOSIX("^([[:digit:]]+):(.*)$")
|
||||
var projidParseRegexp *regexp.Regexp = regexp.MustCompilePOSIX("^([^#][^:]*):([[:digit:]]+)$")
|
||||
var projectsParseRegexp = regexp.MustCompilePOSIX("^([[:digit:]]+):(.*)$")
|
||||
var projidParseRegexp = regexp.MustCompilePOSIX("^([^#][^:]*):([[:digit:]]+)$")
|
||||
|
||||
var quotaIDLock sync.RWMutex
|
||||
|
||||
@ -78,12 +77,13 @@ func unlockFile(file *os.File) error {
|
||||
func openAndLockProjectFiles() (*os.File, *os.File, error) {
|
||||
// Make sure neither project-related file is a symlink!
|
||||
if err := projFilesAreOK(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, fmt.Errorf("system project files failed verification: %v", err)
|
||||
}
|
||||
// We don't actually modify the original files; we create temporaries and
|
||||
// move them over the originals
|
||||
fProjects, err := os.OpenFile(projectsFile, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to open %s: %v", projectsFile, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
fProjid, err := os.OpenFile(projidFile, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
@ -97,10 +97,17 @@ func openAndLockProjectFiles() (*os.File, *os.File, error) {
|
||||
return fProjects, fProjid, nil
|
||||
}
|
||||
// Nothing useful we can do if we get an error here
|
||||
err = fmt.Errorf("unable to lock %s: %v", projidFile, err)
|
||||
unlockFile(fProjects)
|
||||
} else {
|
||||
err = fmt.Errorf("unable to lock %s: %v", projectsFile, err)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("system project files failed re-verification: %v", err)
|
||||
}
|
||||
fProjid.Close()
|
||||
} else {
|
||||
err = fmt.Errorf("unable to open %s: %v", projidFile, err)
|
||||
}
|
||||
fProjects.Close()
|
||||
return nil, nil, err
|
||||
@ -160,7 +167,7 @@ func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.Quot
|
||||
unusedQuotasSearched := 0
|
||||
for id := common.FirstQuota; id == id; id++ {
|
||||
if _, ok := idMap[id]; !ok {
|
||||
isInUse, err := getApplier(path).QuotaIDIsInUse(path, id)
|
||||
isInUse, err := getApplier(path).QuotaIDIsInUse(id)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, err
|
||||
} else if !isInUse {
|
||||
@ -307,8 +314,7 @@ func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, l
|
||||
}
|
||||
os.Remove(tmpProjects)
|
||||
}
|
||||
klog.V(3).Infof("Unable to write project files: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("Unable to write project files: %v", err)
|
||||
}
|
||||
|
||||
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
|
||||
@ -326,8 +332,7 @@ func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("addQuotaID %s %v failed %v", path, ID, err)
|
||||
return common.BadQuotaID, err
|
||||
return common.BadQuotaID, fmt.Errorf("createProjectID %s %v failed %v", path, ID, err)
|
||||
}
|
||||
|
||||
func removeProjectID(path string, ID common.QuotaID) error {
|
||||
@ -348,6 +353,5 @@ func removeProjectID(path string, ID common.QuotaID) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("removeQuotaID %s %v failed %v", path, ID, err)
|
||||
return err
|
||||
return fmt.Errorf("removeProjectID %s %v failed %v", path, ID, err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package quota
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -28,13 +29,13 @@ type Interface interface {
|
||||
SupportsQuotas(m mount.Interface, path string) (bool, error)
|
||||
// Assign a quota (picked by the quota mechanism) to a path,
|
||||
// and return it.
|
||||
AssignQuota(m mount.Interface, path string, poduid string, bytes int64) error
|
||||
AssignQuota(m mount.Interface, path string, poduid string, bytes *resource.Quantity) error
|
||||
|
||||
// Get the quota-based storage consumption for the path
|
||||
GetConsumption(path string) (int64, error)
|
||||
GetConsumption(path string) (*resource.Quantity, error)
|
||||
|
||||
// Get the quota-based inode consumption for the path
|
||||
GetInodes(path string) (int64, error)
|
||||
GetInodes(path string) (*resource.Quantity, error)
|
||||
|
||||
// Remove the quota from a path
|
||||
// Implementations may assume that any data covered by the
|
||||
@ -43,5 +44,5 @@ type Interface interface {
|
||||
}
|
||||
|
||||
func enabledQuotasForMonitoring() bool {
|
||||
return utilfeature.DefaultFeatureGate.Enabled(features.FSQuotaForLSCIMonitoring)
|
||||
return utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolationFSQuotaMonitoring)
|
||||
}
|
||||
|
@ -23,15 +23,13 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/extfs"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/xfs"
|
||||
)
|
||||
|
||||
// Pod -> ID
|
||||
@ -64,8 +62,6 @@ var quotaLock sync.RWMutex
|
||||
var supportsQuotasMap = make(map[string]bool)
|
||||
var supportsQuotasLock sync.RWMutex
|
||||
|
||||
var mountParseRegexp *regexp.Regexp = regexp.MustCompilePOSIX("^([^ ]*)[ \t]*([^ ]*)[ \t]*([^ ]*)") // Ignore options etc.
|
||||
|
||||
// Directory -> backingDev
|
||||
var backingDevMap = make(map[string]string)
|
||||
var backingDevLock sync.RWMutex
|
||||
@ -73,11 +69,8 @@ var backingDevLock sync.RWMutex
|
||||
var mountpointMap = make(map[string]string)
|
||||
var mountpointLock sync.RWMutex
|
||||
|
||||
var mountsFile = "/proc/self/mounts"
|
||||
|
||||
var providers = []common.LinuxVolumeQuotaProvider{
|
||||
&extfs.VolumeProvider{},
|
||||
&xfs.VolumeProvider{},
|
||||
&common.VolumeProvider{},
|
||||
}
|
||||
|
||||
// Separate the innards for ease of testing
|
||||
@ -89,7 +82,7 @@ func detectBackingDevInternal(mountpoint string, mounts string) (string, error)
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
match := mountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
match := common.MountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
if match != nil {
|
||||
device := match[1]
|
||||
mount := match[2]
|
||||
@ -103,7 +96,7 @@ func detectBackingDevInternal(mountpoint string, mounts string) (string, error)
|
||||
|
||||
// detectBackingDev assumes that the mount point provided is valid
|
||||
func detectBackingDev(_ mount.Interface, mountpoint string) (string, error) {
|
||||
return detectBackingDevInternal(mountpoint, mountsFile)
|
||||
return detectBackingDevInternal(mountpoint, common.MountsFile)
|
||||
}
|
||||
|
||||
func clearBackingDev(path string) {
|
||||
@ -140,12 +133,15 @@ func detectMountpointInternal(m mount.Interface, path string) (string, error) {
|
||||
|
||||
func detectMountpoint(m mount.Interface, path string) (string, error) {
|
||||
xpath, err := filepath.Abs(path)
|
||||
if err == nil {
|
||||
if xpath, err = filepath.EvalSymlinks(xpath); err == nil {
|
||||
if xpath, err = detectMountpointInternal(m, xpath); err == nil {
|
||||
return xpath, nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
xpath, err = filepath.EvalSymlinks(xpath)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
if xpath, err = detectMountpointInternal(m, xpath); err == nil {
|
||||
return xpath, nil
|
||||
}
|
||||
return "/", err
|
||||
}
|
||||
@ -171,18 +167,16 @@ func getFSInfo(m mount.Interface, path string) (string, string, error) {
|
||||
mountpoint, okMountpoint := mountpointMap[path]
|
||||
if !okMountpoint {
|
||||
mountpoint, err = detectMountpoint(m, path)
|
||||
klog.V(3).Infof("Mountpoint %s -> %s (%v)", path, mountpoint, err)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", fmt.Errorf("Cannot determine mountpoint for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
backingDev, okBackingDev := backingDevMap[path]
|
||||
if !okBackingDev {
|
||||
backingDev, err = detectBackingDev(m, mountpoint)
|
||||
klog.V(3).Infof("Backing dev %s -> %s (%v)", path, backingDev, err)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", fmt.Errorf("Cannot determine backing device for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
mountpointMap[path] = mountpoint
|
||||
@ -228,20 +222,21 @@ func getQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
|
||||
func clearQuotaOnDir(m mount.Interface, path string) error {
|
||||
// Since we may be called without path being in the map,
|
||||
// we explicitly have to check in this case.
|
||||
klog.V(3).Infof("clearQuotaOnDir %s", path)
|
||||
klog.V(4).Infof("clearQuotaOnDir %s", path)
|
||||
supportsQuotas, err := SupportsQuotas(m, path)
|
||||
if !supportsQuotas {
|
||||
return nil
|
||||
}
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
if err == nil && projid != common.BadQuotaID {
|
||||
klog.V(3).Infof("clearQuotaOnDir clearing quota")
|
||||
// This means that we have a quota on the directory but
|
||||
// we can't clear it. That's not good.
|
||||
err = setQuotaOnDir(path, projid, 0)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Attempt to clear quota failed: %v", err)
|
||||
}
|
||||
// Even if clearing the quota failed, we still need to
|
||||
// try to remove the project ID, or that may be left dangling.
|
||||
err1 := removeProjectID(path, projid)
|
||||
if err1 != nil {
|
||||
klog.V(3).Infof("Attempt to remove quota ID from system files failed: %v", err1)
|
||||
@ -252,9 +247,9 @@ func clearQuotaOnDir(m mount.Interface, path string) error {
|
||||
}
|
||||
return err1
|
||||
}
|
||||
klog.V(3).Infof("clearQuotaOnDir fails %v", err)
|
||||
// If we couldn't get a quota, that's fine -- there may
|
||||
// never have been one, and we have no way to know otherwise
|
||||
klog.V(3).Infof("clearQuotaOnDir fails %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -274,7 +269,6 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
return supportsQuotas, nil
|
||||
}
|
||||
mount, dev, err := getFSInfo(m, path)
|
||||
klog.V(3).Infof("SupportsQuotas %s -> mount %s dev %s %v", path, mount, dev, err)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -289,12 +283,10 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
}
|
||||
}
|
||||
if applier != nil {
|
||||
klog.V(3).Infof("SupportsQuotas got applier %v", applier)
|
||||
supportsQuotasMap[path] = true
|
||||
setApplier(path, applier)
|
||||
return true, nil
|
||||
}
|
||||
klog.V(3).Infof("SupportsQuotas got no applier")
|
||||
delete(backingDevMap, path)
|
||||
delete(mountpointMap, path)
|
||||
return false, nil
|
||||
@ -304,7 +296,11 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
// AssignQuota chooses the quota ID based on the pod UID and path.
|
||||
// If the pod UID is identical to another one known, it may (but presently
|
||||
// doesn't) choose the same quota ID as other volumes in the pod.
|
||||
func AssignQuota(m mount.Interface, path string, poduid string, bytes int64) error {
|
||||
func AssignQuota(m mount.Interface, path string, poduid string, bytes *resource.Quantity) error {
|
||||
if bytes == nil {
|
||||
return fmt.Errorf("Attempting to assign null quota to %s", path)
|
||||
}
|
||||
ibytes := bytes.Value()
|
||||
if ok, err := SupportsQuotas(m, path); !ok {
|
||||
return fmt.Errorf("Quotas not supported on %s: %v", path, err)
|
||||
}
|
||||
@ -316,13 +312,12 @@ func AssignQuota(m mount.Interface, path string, poduid string, bytes int64) err
|
||||
// If and when we decide permanently that we're going to adop
|
||||
// one quota per volume, we can rip all of the pod code out.
|
||||
poduid = string(uuid.NewUUID())
|
||||
klog.V(3).Infof("Synthesizing pod ID %s for directory %s in AssignQuota", poduid, path)
|
||||
if pod, ok := dirPodMap[path]; ok && pod != poduid {
|
||||
return fmt.Errorf("Requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
}
|
||||
oid, ok := podQuotaMap[poduid]
|
||||
if ok {
|
||||
if quotaSizeMap[oid] != bytes {
|
||||
if quotaSizeMap[oid] != ibytes {
|
||||
return fmt.Errorf("Requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
|
||||
}
|
||||
} else {
|
||||
@ -331,60 +326,68 @@ func AssignQuota(m mount.Interface, path string, poduid string, bytes int64) err
|
||||
id, err := createProjectID(path, oid)
|
||||
if err == nil {
|
||||
if oid != common.BadQuotaID && oid != id {
|
||||
klog.V(3).Infof("Attempt to reassign quota %v to %v", oid, id)
|
||||
return fmt.Errorf("Attempt to reassign quota %v to %v", oid, id)
|
||||
}
|
||||
// When enforcing quotas are enabled, we'll condition this
|
||||
// on their being disabled also.
|
||||
if bytes > 0 {
|
||||
bytes = -1
|
||||
if ibytes > 0 {
|
||||
ibytes = -1
|
||||
}
|
||||
if err = setQuotaOnDir(path, id, bytes); err == nil {
|
||||
if err = setQuotaOnDir(path, id, ibytes); err == nil {
|
||||
quotaPodMap[id] = poduid
|
||||
quotaSizeMap[id] = bytes
|
||||
quotaSizeMap[id] = ibytes
|
||||
podQuotaMap[poduid] = id
|
||||
dirQuotaMap[path] = id
|
||||
dirPodMap[path] = poduid
|
||||
podDirCountMap[poduid]++
|
||||
klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path)
|
||||
return nil
|
||||
}
|
||||
removeProjectID(path, id)
|
||||
}
|
||||
klog.V(3).Infof("Assign quota FAILED %v", err)
|
||||
return err
|
||||
return fmt.Errorf("Assign quota FAILED %v", err)
|
||||
}
|
||||
|
||||
// GetConsumption -- retrieve the consumption (in bytes) of the directory
|
||||
func GetConsumption(path string) (int64, error) {
|
||||
func GetConsumption(path string) (*resource.Quantity, error) {
|
||||
// Note that we actually need to hold the lock at least through
|
||||
// running the quota command, so it can't get recycled behind our back
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
applier := getApplier(path)
|
||||
// No applier means directory is not under quota management
|
||||
if applier == nil {
|
||||
return 0, fmt.Errorf("No quota available for %s", path)
|
||||
return nil, nil
|
||||
}
|
||||
return applier.GetConsumption(path, dirQuotaMap[path])
|
||||
ibytes, err := applier.GetConsumption(path, dirQuotaMap[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewQuantity(ibytes, resource.DecimalSI), nil
|
||||
}
|
||||
|
||||
// GetInodes -- retrieve the number of inodes in use under the directory
|
||||
func GetInodes(path string) (int64, error) {
|
||||
func GetInodes(path string) (*resource.Quantity, error) {
|
||||
// Note that we actually need to hold the lock at least through
|
||||
// running the quota command, so it can't get recycled behind our back
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
applier := getApplier(path)
|
||||
// No applier means directory is not under quota management
|
||||
if applier == nil {
|
||||
return 0, fmt.Errorf("No quota available for %s", path)
|
||||
return nil, nil
|
||||
}
|
||||
return applier.GetInodes(path, dirQuotaMap[path])
|
||||
inodes, err := applier.GetInodes(path, dirQuotaMap[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewQuantity(inodes, resource.DecimalSI), nil
|
||||
}
|
||||
|
||||
// ClearQuota -- remove the quota assigned to a directory
|
||||
func ClearQuota(m mount.Interface, path string) error {
|
||||
klog.V(3).Infof("ClearQuota %s", path)
|
||||
if !enabledQuotasForMonitoring() {
|
||||
klog.V(3).Info("ClearQuota called, but quotas disabled")
|
||||
return fmt.Errorf("ClearQuota called, but quotas disabled")
|
||||
}
|
||||
quotaLock.Lock()
|
||||
@ -412,6 +415,8 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
count, ok := podDirCountMap[poduid]
|
||||
if count <= 1 || !ok {
|
||||
err = clearQuotaOnDir(m, path)
|
||||
// This error should be noted; we still need to clean up
|
||||
// and otherwise handle in the same way.
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Unable to clear quota %v %s: %v", dirQuotaMap[path], path, err)
|
||||
}
|
||||
@ -422,11 +427,14 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
} else {
|
||||
err = removeProjectID(path, projid)
|
||||
podDirCountMap[poduid]--
|
||||
klog.V(3).Infof("Not clearing quota for pod %s; still %v dirs outstanding", poduid, podDirCountMap[poduid])
|
||||
klog.V(4).Infof("Not clearing quota for pod %s; still %v dirs outstanding", poduid, podDirCountMap[poduid])
|
||||
}
|
||||
delete(dirPodMap, path)
|
||||
delete(dirQuotaMap, path)
|
||||
delete(supportsQuotasMap, path)
|
||||
clearApplier(path)
|
||||
return err
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to clear quota for %s: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -21,7 +21,9 @@ package quota
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
@ -74,13 +76,13 @@ func dummyFakeMount1() mount.Interface {
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
{
|
||||
Device: "dev/mapper/fedora-root",
|
||||
Device: "/dev/mapper/fedora-root",
|
||||
Path: "/",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
{
|
||||
Device: "dev/mapper/fedora-home",
|
||||
Device: "/dev/mapper/fedora-home",
|
||||
Path: "/home",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
@ -363,7 +365,7 @@ func (v testVolumeQuota) GetQuotaOnDir(path string) (common.QuotaID, error) {
|
||||
return common.BadQuotaID, fmt.Errorf("No quota available for %s", path)
|
||||
}
|
||||
|
||||
func (v testVolumeQuota) QuotaIDIsInUse(_ string, id common.QuotaID) (bool, error) {
|
||||
func (v testVolumeQuota) QuotaIDIsInUse(id common.QuotaID) (bool, error) {
|
||||
if _, ok := testIDQuotaMap[id]; ok {
|
||||
return true, nil
|
||||
}
|
||||
@ -389,7 +391,7 @@ func fakeSupportsQuotas(path string) (bool, error) {
|
||||
|
||||
func fakeAssignQuota(path string, poduid string, bytes int64) error {
|
||||
dummySetFSInfo(path)
|
||||
return AssignQuota(dummyQuotaTest(), path, poduid, bytes)
|
||||
return AssignQuota(dummyQuotaTest(), path, poduid, resource.NewQuantity(bytes, resource.DecimalSI))
|
||||
}
|
||||
|
||||
func fakeClearQuota(path string) error {
|
||||
@ -529,14 +531,6 @@ func compareProjectsFiles(t *testing.T, testcase quotaTestCase, projectsFile str
|
||||
}
|
||||
}
|
||||
|
||||
func setFeature(feature utilfeature.Feature, value bool) error {
|
||||
v := "true"
|
||||
if !value {
|
||||
v = "false"
|
||||
}
|
||||
return utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%s", string(feature), v))
|
||||
}
|
||||
|
||||
func runCaseEnabled(t *testing.T, testcase quotaTestCase, seq int) bool {
|
||||
fail := false
|
||||
var err error
|
||||
@ -604,9 +598,7 @@ func runCaseDisabled(t *testing.T, testcase quotaTestCase, seq int) bool {
|
||||
}
|
||||
|
||||
func testAddRemoveQuotas(t *testing.T, enabled bool) {
|
||||
if err := setFeature(features.FSQuotaForLSCIMonitoring, enabled); err != nil {
|
||||
t.Errorf("Unable to enable LSCI monitoring: %v", err)
|
||||
}
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolationFSQuotaMonitoring, enabled)()
|
||||
tmpProjectsFile, err := ioutil.TempFile("", "projects")
|
||||
if err == nil {
|
||||
_, err = tmpProjectsFile.WriteString(projectsHeader)
|
||||
|
@ -20,6 +20,7 @@ package quota
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
@ -34,18 +35,18 @@ func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
|
||||
}
|
||||
|
||||
// AssignQuota -- dummy implementation
|
||||
func AssignQuota(_ mount.Interface, _ string, _ string, _ int64) error {
|
||||
func AssignQuota(_ mount.Interface, _ string, _ string, _ *resource.Quantity) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
// GetConsumption -- dummy implementation
|
||||
func GetConsumption(_ string) (int64, error) {
|
||||
return 0, errNotImplemented
|
||||
func GetConsumption(_ string) (*resource.Quantity, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// GetInodes -- dummy implementation
|
||||
func GetInodes(_ string) (int64, error) {
|
||||
return 0, errNotImplemented
|
||||
func GetInodes(_ string) (*resource.Quantity, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// ClearQuota -- dummy implementation
|
||||
|
@ -1,31 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["quota_xfs.go"],
|
||||
cgo = True,
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/quota/xfs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume/util/quota/common:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -1,153 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package xfs
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <dirent.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/dqblk_xfs.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef PRJQUOTA
|
||||
#define PRJQUOTA 2
|
||||
#endif
|
||||
#ifndef XFS_PROJ_QUOTA
|
||||
#define XFS_PROJ_QUOTA 2
|
||||
#endif
|
||||
#ifndef Q_XSETPQLIM
|
||||
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
|
||||
#endif
|
||||
#ifndef Q_XGETPQUOTA
|
||||
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
const (
|
||||
linuxXfsMagic = 0x58465342
|
||||
// Documented in man xfs_quota(8); not necessarily the same
|
||||
// as the filesystem blocksize
|
||||
quotaBsize = 512
|
||||
bitsPerWord = 32 << (^uint(0) >> 63) // either 32 or 64
|
||||
maxQuota int64 = 1<<(bitsPerWord-1) - 1 // either 1<<31 - 1 or 1<<63 - 1
|
||||
)
|
||||
|
||||
// VolumeProvider supplies a quota applier to the generic code.
|
||||
type VolumeProvider struct {
|
||||
}
|
||||
|
||||
// GetQuotaApplier -- does this backing device support quotas that
|
||||
// can be applied to directories?
|
||||
func (*VolumeProvider) GetQuotaApplier(mountpoint string, backingDev string) common.LinuxVolumeQuotaApplier {
|
||||
if common.IsFilesystemOfType(mountpoint, backingDev, linuxXfsMagic) {
|
||||
return xfsVolumeQuota{backingDev}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type xfsVolumeQuota struct {
|
||||
backingDev string
|
||||
}
|
||||
|
||||
// GetQuotaOnDir -- get the quota ID that applies to this directory.
|
||||
func (v xfsVolumeQuota) GetQuotaOnDir(path string) (common.QuotaID, error) {
|
||||
return common.GetQuotaOnDir(path)
|
||||
}
|
||||
|
||||
// SetQuotaOnDir -- apply the specified quota to the directory. If
|
||||
// bytes is not greater than zero, the quota should be applied in a
|
||||
// way that is non-enforcing (either explicitly so or by setting a
|
||||
// quota larger than anything the user may possibly create)
|
||||
func (v xfsVolumeQuota) SetQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
|
||||
klog.V(3).Infof("xfsSetQuotaOn %s ID %v bytes %v", path, id, bytes)
|
||||
if bytes < 0 || bytes > maxQuota {
|
||||
bytes = maxQuota
|
||||
}
|
||||
|
||||
var d C.fs_disk_quota_t
|
||||
d.d_version = C.FS_DQUOT_VERSION
|
||||
d.d_id = C.__u32(id)
|
||||
d.d_flags = C.XFS_PROJ_QUOTA
|
||||
|
||||
d.d_fieldmask = C.FS_DQ_BHARD
|
||||
d.d_blk_hardlimit = C.__u64(bytes / quotaBsize)
|
||||
d.d_blk_softlimit = d.d_blk_hardlimit
|
||||
|
||||
var cs = C.CString(v.backingDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("Failed to set quota limit for ID %d on %s: %v",
|
||||
id, path, errno.Error())
|
||||
}
|
||||
return common.ApplyProjectToDir(path, id)
|
||||
}
|
||||
|
||||
func (v xfsVolumeQuota) getQuotaInfo(path string, id common.QuotaID) (C.fs_disk_quota_t, syscall.Errno) {
|
||||
var d C.fs_disk_quota_t
|
||||
|
||||
var cs = C.CString(v.backingDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(id)),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return d, errno
|
||||
}
|
||||
|
||||
// QuotaIDIsInUse -- determine whether the quota ID is already in use.
|
||||
func (v xfsVolumeQuota) QuotaIDIsInUse(path string, id common.QuotaID) (bool, error) {
|
||||
_, errno := v.getQuotaInfo(path, id)
|
||||
return errno == 0, nil
|
||||
}
|
||||
|
||||
// GetConsumption -- retrieve the consumption (in bytes) of the directory
|
||||
func (v xfsVolumeQuota) GetConsumption(path string, id common.QuotaID) (int64, error) {
|
||||
d, errno := v.getQuotaInfo(path, id)
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("Failed to get quota for %s: %s", path, errno.Error())
|
||||
}
|
||||
klog.V(3).Infof("Consumption for %s is %v", path, d.d_bcount*quotaBsize)
|
||||
return int64(d.d_bcount) * quotaBsize, nil
|
||||
}
|
||||
|
||||
// GetInodes -- retrieve the number of inodes in use under the directory
|
||||
func (v xfsVolumeQuota) GetInodes(path string, id common.QuotaID) (int64, error) {
|
||||
d, errno := v.getQuotaInfo(path, id)
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("Failed to get quota for %s: %s", path, errno.Error())
|
||||
}
|
||||
klog.V(3).Infof("Inode consumption for %s is %v", path, d.d_icount)
|
||||
return int64(d.d_icount), nil
|
||||
}
|
@ -540,3 +540,11 @@ func GetPluginMountDir(host volume.VolumeHost, name string) string {
|
||||
mntDir := filepath.Join(host.GetPluginDir(name), MountsInGlobalPDPath)
|
||||
return mntDir
|
||||
}
|
||||
|
||||
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
|
||||
// volume vs. some other type
|
||||
func IsLocalEphemeralVolume(volume v1.Volume) bool {
|
||||
return volume.GitRepo != nil ||
|
||||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
|
||||
volume.ConfigMap != nil || volume.DownwardAPI != nil
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ type Attributes struct {
|
||||
// MounterArgs provides more easily extensible arguments to Mounter
|
||||
type MounterArgs struct {
|
||||
FsGroup *int64
|
||||
DesiredSize int64
|
||||
DesiredSize *resource.Quantity
|
||||
PodUID string
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user