Add storage isolation API

This PR adds the new APIs to support storage capacity isolation as described in the proposal
https://github.com/kubernetes/community/pull/306

1. Add SizeLimit for emptyDir volume
2. Add scratch and overlay storage type used by container level or
node level
This commit is contained in:
Jing Xu 2017-05-25 13:53:40 -07:00
parent 052cd6d30b
commit 85f030c2aa
5 changed files with 100 additions and 1 deletions

View File

@ -601,6 +601,14 @@ type EmptyDirVolumeSource struct {
// The default is "" which means to use the node's default medium.
// +optional
Medium StorageMedium
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit resource.Quantity
}
// StorageMedium defines ways that storage can be allocated to a volume.
@ -3017,6 +3025,12 @@ const (
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local Storage for overlay filesystem, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceStorageOverlay is alpha and it can change across releases.
ResourceStorageOverlay ResourceName = "storage.kubernetes.io/overlay"
// Local Storage for scratch space, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceStorageScratch is alpha and it can change across releases.
ResourceStorageScratch ResourceName = "storage.kubernetes.io/scratch"
// NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
// Number of Pods that may be running on this Node: see ResourcePods

View File

@ -686,6 +686,14 @@ type EmptyDirVolumeSource struct {
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
@ -3455,6 +3463,12 @@ const (
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local Storage for container overlay filesystem, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceStorageOverlay is alpha and it can change across releases.
ResourceStorageOverlay ResourceName = "storage.kubernetes.io/overlay"
// Local Storage for scratch space, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceStorageScratch is alpha and it can change across releases.
ResourceStorageScratch ResourceName = "storage.kubernetes.io/scratch"
// NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
// Number of Pods that may be running on this Node: see ResourcePods

View File

@ -396,7 +396,12 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.E
allErrs := field.ErrorList{}
if source.EmptyDir != nil {
numVolumes++
// EmptyDirs have nothing to validate
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
unsetSizeLimit := resource.Quantity{}
if unsetSizeLimit.Cmp(source.EmptyDir.SizeLimit) != 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field disabled by feature-gate for EmptyDir volumes"))
}
}
}
if source.HostPath != nil {
if numVolumes > 0 {
@ -3633,6 +3638,9 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat
allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName)))
}
}
if resourceName == api.ResourceStorageOverlay && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
allErrs = append(allErrs, field.Forbidden(limPath, "ResourceStorageOverlay field disabled by feature-gate for ResourceRequirements"))
}
}
for resourceName, quantity := range requirements.Requests {
fldPath := reqPath.Key(string(resourceName))

View File

@ -2281,6 +2281,62 @@ func TestValidateVolumes(t *testing.T) {
}
}
func TestAlphaLocalStorageCapacityIsolation(t *testing.T) {
testCases := []api.VolumeSource{
{EmptyDir: &api.EmptyDirVolumeSource{SizeLimit: *resource.NewQuantity(int64(5), resource.BinarySI)}},
}
// Enable alpha feature LocalStorageCapacityIsolation
err := utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=true")
if err != nil {
t.Errorf("Failed to enable feature gate for LocalStorageCapacityIsolation: %v", err)
return
}
for _, tc := range testCases {
if errs := validateVolumeSource(&tc, field.NewPath("spec")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
// Disable alpha feature LocalStorageCapacityIsolation
err = utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=false")
if err != nil {
t.Errorf("Failed to disable feature gate for LocalStorageCapacityIsolation: %v", err)
return
}
for _, tc := range testCases {
if errs := validateVolumeSource(&tc, field.NewPath("spec")); len(errs) == 0 {
t.Errorf("expected failure: %v", errs)
}
}
containerLimitCase := api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceStorageOverlay: *resource.NewMilliQuantity(
int64(40000),
resource.BinarySI),
},
}
// Enable alpha feature LocalStorageCapacityIsolation
err = utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=true")
if err != nil {
t.Errorf("Failed to enable feature gate for LocalStorageCapacityIsolation: %v", err)
return
}
if errs := ValidateResourceRequirements(&containerLimitCase, field.NewPath("resources")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
// Disable alpha feature LocalStorageCapacityIsolation
err = utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=false")
if err != nil {
t.Errorf("Failed to disable feature gate for LocalStorageCapacityIsolation: %v", err)
return
}
if errs := ValidateResourceRequirements(&containerLimitCase, field.NewPath("resources")); len(errs) == 0 {
t.Errorf("expected failure: %v", errs)
}
}
func TestValidatePorts(t *testing.T) {
successCase := []api.ContainerPort{
{Name: "abc", ContainerPort: 80, HostPort: 80, Protocol: "TCP"},

View File

@ -94,6 +94,12 @@ const (
//
// A new volume type that supports local disks on a node.
PersistentLocalVolumes utilfeature.Feature = "PersistentLocalVolumes"
// owner: @jinxu
// alpha: v1.7
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation"
)
func init() {
@ -114,6 +120,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
Accelerators: {Default: false, PreRelease: utilfeature.Alpha},
TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha},
PersistentLocalVolumes: {Default: false, PreRelease: utilfeature.Alpha},
LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side: