mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
Merge pull request #20446 from derekwaynecarr/quota_scopes
Auto commit by PR queue bot
This commit is contained in:
commit
6f8a951f87
@ -17515,9 +17515,20 @@
|
||||
"hard": {
|
||||
"type": "any",
|
||||
"description": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota"
|
||||
},
|
||||
"scopes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "v1.ResourceQuotaScope"
|
||||
},
|
||||
"description": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects."
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.ResourceQuotaScope": {
|
||||
"id": "v1.ResourceQuotaScope",
|
||||
"properties": {}
|
||||
},
|
||||
"v1.ResourceQuotaStatus": {
|
||||
"id": "v1.ResourceQuotaStatus",
|
||||
"description": "ResourceQuotaStatus defines the enforced hard limits and observed use.",
|
||||
|
@ -60,6 +60,7 @@ import (
|
||||
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/healthz"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
@ -226,9 +227,23 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
|
||||
}
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
resourceQuotaControllerClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller"))
|
||||
resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
|
||||
groupKindsToReplenish := []unversioned.GroupKind{
|
||||
api.Kind("Pod"),
|
||||
api.Kind("Service"),
|
||||
api.Kind("ReplicationController"),
|
||||
api.Kind("PersistentVolumeClaim"),
|
||||
api.Kind("Secret"),
|
||||
}
|
||||
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
||||
KubeClient: resourceQuotaControllerClient,
|
||||
ResyncPeriod: controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
|
||||
Registry: resourceQuotaRegistry,
|
||||
GroupKindsToReplenish: groupKindsToReplenish,
|
||||
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(resourceQuotaControllerClient),
|
||||
}
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
kubecontrollermanager "k8s.io/kubernetes/cmd/kube-controller-manager/app"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/node"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
@ -53,6 +54,7 @@ import (
|
||||
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/healthz"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -173,8 +175,23 @@ func (s *CMServer) Run(_ []string) error {
|
||||
routeController.Run(s.NodeSyncPeriod.Duration)
|
||||
}
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
resourceQuotaControllerClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller"))
|
||||
resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
|
||||
groupKindsToReplenish := []unversioned.GroupKind{
|
||||
api.Kind("Pod"),
|
||||
api.Kind("Service"),
|
||||
api.Kind("ReplicationController"),
|
||||
api.Kind("PersistentVolumeClaim"),
|
||||
api.Kind("Secret"),
|
||||
}
|
||||
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
||||
KubeClient: resourceQuotaControllerClient,
|
||||
ResyncPeriod: controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
|
||||
Registry: resourceQuotaRegistry,
|
||||
GroupKindsToReplenish: groupKindsToReplenish,
|
||||
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(resourceQuotaControllerClient),
|
||||
}
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
)
|
||||
|
||||
type resources struct {
|
||||
@ -81,7 +80,6 @@ func TestResources(tst *testing.T) {
|
||||
}
|
||||
|
||||
tst.Logf("Testing resource computation for %v => request=%v limit=%v", t, pod.Spec.Containers[0].Resources.Requests, pod.Spec.Containers[0].Resources.Limits)
|
||||
tst.Logf("hasRequests: cpu => %v, mem => %v", resourcequota.PodHasRequests(pod, api.ResourceCPU), resourcequota.PodHasRequests(pod, api.ResourceMemory))
|
||||
|
||||
beforeCpuR, beforeCpuL, _, err := LimitedCPUForPod(pod, DefaultDefaultContainerCPULimit)
|
||||
assert.NoError(err, "CPUForPod should not return an error")
|
||||
|
@ -5591,7 +5591,7 @@ Populated by the system when a graceful deletion is requested. Read-only. More i
|
||||
</div>
|
||||
<div id="footer">
|
||||
<div id="footer-text">
|
||||
Last updated 2016-02-26 21:38:01 UTC
|
||||
Last updated 2016-02-27 01:28:36 UTC
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
@ -1064,6 +1064,13 @@ Examples:<br>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_any">any</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">scopes</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_resourcequotascope">v1.ResourceQuotaScope</a> array</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@ -6188,6 +6195,10 @@ The resulting set of endpoints can be viewed as:<br>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_resourcequotascope">v1.ResourceQuotaScope</h3>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_replicationcontrollerlist">v1.ReplicationControllerList</h3>
|
||||
@ -7477,7 +7488,7 @@ The resulting set of endpoints can be viewed as:<br>
|
||||
</div>
|
||||
<div id="footer">
|
||||
<div id="footer-text">
|
||||
Last updated 2016-02-26 21:37:52 UTC
|
||||
Last updated 2016-02-27 01:28:28 UTC
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
@ -112,11 +112,11 @@ Support the following resources that can be tracked by quota.
|
||||
| Resource Name | Description |
|
||||
| ------------- | ----------- |
|
||||
| cpu | total cpu requests (backwards compatibility) |
|
||||
| cpu.request | total cpu requests |
|
||||
| cpu.limit | total cpu limits |
|
||||
| memory | total memory requests (backwards compatibility) |
|
||||
| memory.request | total memory requests |
|
||||
| memory.limit | total memory limits |
|
||||
| requests.cpu | total cpu requests |
|
||||
| requests.memory | total memory requests |
|
||||
| limits.cpu | total cpu limits |
|
||||
| limits.memory | total memory limits |
|
||||
|
||||
### Resource Quota Scopes
|
||||
|
||||
@ -145,22 +145,22 @@ A `Terminating`, `NotTerminating`, `NotBestEffort` scope restricts a quota to
|
||||
tracking the following resources:
|
||||
|
||||
* pod
|
||||
* memory, memory.request, memory.limit
|
||||
* cpu, cpu.request, cpu.limit
|
||||
* memory, requests.memory, limits.memory
|
||||
* cpu, requests.cpu, limits.cpu
|
||||
|
||||
## Data Model Impact
|
||||
|
||||
```
|
||||
// The following identify resource constants for Kubernetes object types
|
||||
const (
|
||||
// CPU Request, in cores
|
||||
ResourceCPURequest ResourceName = "cpu.request"
|
||||
// CPU Limit, in bytes
|
||||
ResourceCPULimit ResourceName = "cpu.limit"
|
||||
// Memory Request, in bytes
|
||||
ResourceMemoryRequest ResourceName = "memory.request"
|
||||
// Memory Limit, in bytes
|
||||
ResourceMemoryLimit ResourceName = "memory.limit"
|
||||
// CPU request, in cores. (500m = .5 cores)
|
||||
ResourceRequestsCPU ResourceName = "requests.cpu"
|
||||
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceRequestsMemory ResourceName = "requests.memory"
|
||||
// CPU limit, in cores. (500m = .5 cores)
|
||||
ResourceLimitsCPU ResourceName = "limits.cpu"
|
||||
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||
)
|
||||
|
||||
// A scope is a filter that matches an object
|
||||
@ -178,8 +178,8 @@ const (
|
||||
type ResourceQuotaSpec struct {
|
||||
// Hard is the set of desired hard limits for each named resource
|
||||
Hard ResourceList `json:"hard,omitempty"`
|
||||
// Scopes is the set of filters that must match an object for it to be
|
||||
// tracked by the quota
|
||||
// A collection of filters that must match each object tracked by a quota.
|
||||
// If not specified, the quota matches all objects.
|
||||
Scopes []ResourceQuotaScope `json:"scopes,omitempty"`
|
||||
}
|
||||
```
|
||||
|
@ -2455,6 +2455,15 @@ func DeepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec
|
||||
} else {
|
||||
out.Hard = nil
|
||||
}
|
||||
if in.Scopes != nil {
|
||||
in, out := in.Scopes, &out.Scopes
|
||||
*out = make([]ResourceQuotaScope, len(in))
|
||||
for i := range in {
|
||||
(*out)[i] = in[i]
|
||||
}
|
||||
} else {
|
||||
out.Scopes = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -79,9 +79,82 @@ var Semantic = conversion.EqualitiesOrDie(
|
||||
},
|
||||
)
|
||||
|
||||
var standardResourceQuotaScopes = sets.NewString(
|
||||
string(ResourceQuotaScopeTerminating),
|
||||
string(ResourceQuotaScopeNotTerminating),
|
||||
string(ResourceQuotaScopeBestEffort),
|
||||
string(ResourceQuotaScopeNotBestEffort),
|
||||
)
|
||||
|
||||
// IsStandardResourceQuotaScope returns true if the scope is a standard value
|
||||
func IsStandardResourceQuotaScope(str string) bool {
|
||||
return standardResourceQuotaScopes.Has(str)
|
||||
}
|
||||
|
||||
var podObjectCountQuotaResources = sets.NewString(
|
||||
string(ResourcePods),
|
||||
)
|
||||
|
||||
var podComputeQuotaResources = sets.NewString(
|
||||
string(ResourceCPU),
|
||||
string(ResourceMemory),
|
||||
string(ResourceLimitsCPU),
|
||||
string(ResourceLimitsMemory),
|
||||
string(ResourceRequestsCPU),
|
||||
string(ResourceRequestsMemory),
|
||||
)
|
||||
|
||||
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
|
||||
func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool {
|
||||
switch scope {
|
||||
case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort:
|
||||
return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
|
||||
case ResourceQuotaScopeBestEffort:
|
||||
return podObjectCountQuotaResources.Has(resource)
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
var standardContainerResources = sets.NewString(
|
||||
string(ResourceCPU),
|
||||
string(ResourceMemory),
|
||||
)
|
||||
|
||||
// IsStandardContainerResourceName returns true if the container can make a resource request
|
||||
// for the specified resource
|
||||
func IsStandardContainerResourceName(str string) bool {
|
||||
return standardContainerResources.Has(str)
|
||||
}
|
||||
|
||||
var standardQuotaResources = sets.NewString(
|
||||
string(ResourceCPU),
|
||||
string(ResourceMemory),
|
||||
string(ResourceRequestsCPU),
|
||||
string(ResourceRequestsMemory),
|
||||
string(ResourceLimitsCPU),
|
||||
string(ResourceLimitsMemory),
|
||||
string(ResourcePods),
|
||||
string(ResourceQuotas),
|
||||
string(ResourceServices),
|
||||
string(ResourceReplicationControllers),
|
||||
string(ResourceSecrets),
|
||||
string(ResourcePersistentVolumeClaims),
|
||||
)
|
||||
|
||||
// IsStandardQuotaResourceName returns true if the resource is known to
|
||||
// the quota tracking system
|
||||
func IsStandardQuotaResourceName(str string) bool {
|
||||
return standardQuotaResources.Has(str)
|
||||
}
|
||||
|
||||
var standardResources = sets.NewString(
|
||||
string(ResourceCPU),
|
||||
string(ResourceMemory),
|
||||
string(ResourceRequestsCPU),
|
||||
string(ResourceRequestsMemory),
|
||||
string(ResourceLimitsCPU),
|
||||
string(ResourceLimitsMemory),
|
||||
string(ResourcePods),
|
||||
string(ResourceQuotas),
|
||||
string(ResourceServices),
|
||||
|
@ -44187,6 +44187,32 @@ func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x) {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x))
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x) {
|
||||
} else {
|
||||
*((*string)(x)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
@ -44201,13 +44227,14 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [1]bool
|
||||
var yyq2 [2]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = len(x.Hard) != 0
|
||||
yyq2[1] = len(x.Scopes) != 0
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(1)
|
||||
r.EncodeArrayStart(2)
|
||||
} else {
|
||||
yynn2 = 0
|
||||
for _, b := range yyq2 {
|
||||
@ -44241,6 +44268,39 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
if x.Scopes == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("scopes"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Scopes == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
@ -44309,6 +44369,18 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
|
||||
yyv4 := &x.Hard
|
||||
yyv4.CodecDecodeSelf(d)
|
||||
}
|
||||
case "scopes":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Scopes = nil
|
||||
} else {
|
||||
yyv5 := &x.Scopes
|
||||
yym6 := z.DecBinary()
|
||||
_ = yym6
|
||||
if false {
|
||||
} else {
|
||||
h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d)
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@ -44320,16 +44392,16 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj5 int
|
||||
var yyb5 bool
|
||||
var yyhl5 bool = l >= 0
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
@ -44337,21 +44409,43 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Hard = nil
|
||||
} else {
|
||||
yyv6 := &x.Hard
|
||||
yyv6.CodecDecodeSelf(d)
|
||||
yyv8 := &x.Hard
|
||||
yyv8.CodecDecodeSelf(d)
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Scopes = nil
|
||||
} else {
|
||||
yyv9 := &x.Scopes
|
||||
yym10 := z.DecBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d)
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj5-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
@ -53768,6 +53862,116 @@ func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decode
|
||||
}
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
r.EncodeArrayStart(len(v))
|
||||
for _, yyv1 := range v {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yyv1.CodecEncodeSelf(e)
|
||||
}
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
|
||||
yyv1 := *v
|
||||
yyh1, yyl1 := z.DecSliceHelperStart()
|
||||
var yyc1 bool
|
||||
_ = yyc1
|
||||
if yyl1 == 0 {
|
||||
if yyv1 == nil {
|
||||
yyv1 = []ResourceQuotaScope{}
|
||||
yyc1 = true
|
||||
} else if len(yyv1) != 0 {
|
||||
yyv1 = yyv1[:0]
|
||||
yyc1 = true
|
||||
}
|
||||
} else if yyl1 > 0 {
|
||||
var yyrr1, yyrl1 int
|
||||
var yyrt1 bool
|
||||
_, _ = yyrl1, yyrt1
|
||||
yyrr1 = yyl1 // len(yyv1)
|
||||
if yyl1 > cap(yyv1) {
|
||||
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
} else {
|
||||
yyv1 = make([]ResourceQuotaScope, yyrl1)
|
||||
}
|
||||
} else {
|
||||
yyv1 = make([]ResourceQuotaScope, yyrl1)
|
||||
}
|
||||
yyc1 = true
|
||||
yyrr1 = len(yyv1)
|
||||
} else if yyl1 != len(yyv1) {
|
||||
yyv1 = yyv1[:yyl1]
|
||||
yyc1 = true
|
||||
}
|
||||
yyj1 := 0
|
||||
for ; yyj1 < yyrr1; yyj1++ {
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = ""
|
||||
} else {
|
||||
yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
|
||||
}
|
||||
|
||||
}
|
||||
if yyrt1 {
|
||||
for ; yyj1 < yyl1; yyj1++ {
|
||||
yyv1 = append(yyv1, "")
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = ""
|
||||
} else {
|
||||
yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
yyj1 := 0
|
||||
for ; !r.CheckBreak(); yyj1++ {
|
||||
|
||||
if yyj1 >= len(yyv1) {
|
||||
yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope
|
||||
yyc1 = true
|
||||
}
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if yyj1 < len(yyv1) {
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = ""
|
||||
} else {
|
||||
yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
|
||||
}
|
||||
|
||||
} else {
|
||||
z.DecSwallow()
|
||||
}
|
||||
|
||||
}
|
||||
if yyj1 < len(yyv1) {
|
||||
yyv1 = yyv1[:yyj1]
|
||||
yyc1 = true
|
||||
} else if yyj1 == 0 && yyv1 == nil {
|
||||
yyv1 = []ResourceQuotaScope{}
|
||||
yyc1 = true
|
||||
}
|
||||
}
|
||||
yyh1.End()
|
||||
if yyc1 {
|
||||
*v = yyv1
|
||||
}
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
@ -53807,7 +54011,7 @@ func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.
|
||||
|
||||
yyrg1 := len(yyv1) > 0
|
||||
yyv21 := yyv1
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216)
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
|
@ -2189,12 +2189,37 @@ const (
|
||||
ResourceSecrets ResourceName = "secrets"
|
||||
// ResourcePersistentVolumeClaims, number
|
||||
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
|
||||
// CPU request, in cores. (500m = .5 cores)
|
||||
ResourceRequestsCPU ResourceName = "requests.cpu"
|
||||
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceRequestsMemory ResourceName = "requests.memory"
|
||||
// CPU limit, in cores. (500m = .5 cores)
|
||||
ResourceLimitsCPU ResourceName = "limits.cpu"
|
||||
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||
)
|
||||
|
||||
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
|
||||
type ResourceQuotaScope string
|
||||
|
||||
const (
|
||||
// Match all pod objects where spec.activeDeadlineSeconds
|
||||
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
|
||||
// Match all pod objects where !spec.activeDeadlineSeconds
|
||||
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
|
||||
// Match all pod objects that have best effort quality of service
|
||||
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
|
||||
// Match all pod objects that do not have best effort quality of service
|
||||
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
|
||||
)
|
||||
|
||||
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota
|
||||
type ResourceQuotaSpec struct {
|
||||
// Hard is the set of desired hard limits for each named resource
|
||||
Hard ResourceList `json:"hard,omitempty"`
|
||||
// A collection of filters that must match each object tracked by a quota.
|
||||
// If not specified, the quota matches all objects.
|
||||
Scopes []ResourceQuotaScope `json:"scopes,omitempty"`
|
||||
}
|
||||
|
||||
// ResourceQuotaStatus defines the enforced hard limits and observed use
|
||||
|
@ -2666,6 +2666,14 @@ func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQ
|
||||
} else {
|
||||
out.Hard = nil
|
||||
}
|
||||
if in.Scopes != nil {
|
||||
out.Scopes = make([]ResourceQuotaScope, len(in.Scopes))
|
||||
for i := range in.Scopes {
|
||||
out.Scopes[i] = ResourceQuotaScope(in.Scopes[i])
|
||||
}
|
||||
} else {
|
||||
out.Scopes = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5887,6 +5895,14 @@ func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuota
|
||||
if err := s.Convert(&in.Hard, &out.Hard, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.Scopes != nil {
|
||||
out.Scopes = make([]api.ResourceQuotaScope, len(in.Scopes))
|
||||
for i := range in.Scopes {
|
||||
out.Scopes[i] = api.ResourceQuotaScope(in.Scopes[i])
|
||||
}
|
||||
} else {
|
||||
out.Scopes = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2084,6 +2084,14 @@ func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec,
|
||||
} else {
|
||||
out.Hard = nil
|
||||
}
|
||||
if in.Scopes != nil {
|
||||
out.Scopes = make([]ResourceQuotaScope, len(in.Scopes))
|
||||
for i := range in.Scopes {
|
||||
out.Scopes[i] = in.Scopes[i]
|
||||
}
|
||||
} else {
|
||||
out.Scopes = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -43999,6 +43999,32 @@ func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x) {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x))
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x) {
|
||||
} else {
|
||||
*((*string)(x)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
@ -44013,13 +44039,14 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [1]bool
|
||||
var yyq2 [2]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = len(x.Hard) != 0
|
||||
yyq2[1] = len(x.Scopes) != 0
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(1)
|
||||
r.EncodeArrayStart(2)
|
||||
} else {
|
||||
yynn2 = 0
|
||||
for _, b := range yyq2 {
|
||||
@ -44053,6 +44080,39 @@ func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
if x.Scopes == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("scopes"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Scopes == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
@ -44121,6 +44181,18 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
|
||||
yyv4 := &x.Hard
|
||||
yyv4.CodecDecodeSelf(d)
|
||||
}
|
||||
case "scopes":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Scopes = nil
|
||||
} else {
|
||||
yyv5 := &x.Scopes
|
||||
yym6 := z.DecBinary()
|
||||
_ = yym6
|
||||
if false {
|
||||
} else {
|
||||
h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d)
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@ -44132,16 +44204,16 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj5 int
|
||||
var yyb5 bool
|
||||
var yyhl5 bool = l >= 0
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
@ -44149,21 +44221,43 @@ func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Hard = nil
|
||||
} else {
|
||||
yyv6 := &x.Hard
|
||||
yyv6.CodecDecodeSelf(d)
|
||||
yyv8 := &x.Hard
|
||||
yyv8.CodecDecodeSelf(d)
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Scopes = nil
|
||||
} else {
|
||||
yyv9 := &x.Scopes
|
||||
yym10 := z.DecBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d)
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj5-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
@ -53826,6 +53920,116 @@ func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decode
|
||||
}
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
r.EncodeArrayStart(len(v))
|
||||
for _, yyv1 := range v {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yyv1.CodecEncodeSelf(e)
|
||||
}
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
|
||||
yyv1 := *v
|
||||
yyh1, yyl1 := z.DecSliceHelperStart()
|
||||
var yyc1 bool
|
||||
_ = yyc1
|
||||
if yyl1 == 0 {
|
||||
if yyv1 == nil {
|
||||
yyv1 = []ResourceQuotaScope{}
|
||||
yyc1 = true
|
||||
} else if len(yyv1) != 0 {
|
||||
yyv1 = yyv1[:0]
|
||||
yyc1 = true
|
||||
}
|
||||
} else if yyl1 > 0 {
|
||||
var yyrr1, yyrl1 int
|
||||
var yyrt1 bool
|
||||
_, _ = yyrl1, yyrt1
|
||||
yyrr1 = yyl1 // len(yyv1)
|
||||
if yyl1 > cap(yyv1) {
|
||||
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
} else {
|
||||
yyv1 = make([]ResourceQuotaScope, yyrl1)
|
||||
}
|
||||
} else {
|
||||
yyv1 = make([]ResourceQuotaScope, yyrl1)
|
||||
}
|
||||
yyc1 = true
|
||||
yyrr1 = len(yyv1)
|
||||
} else if yyl1 != len(yyv1) {
|
||||
yyv1 = yyv1[:yyl1]
|
||||
yyc1 = true
|
||||
}
|
||||
yyj1 := 0
|
||||
for ; yyj1 < yyrr1; yyj1++ {
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = ""
|
||||
} else {
|
||||
yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
|
||||
}
|
||||
|
||||
}
|
||||
if yyrt1 {
|
||||
for ; yyj1 < yyl1; yyj1++ {
|
||||
yyv1 = append(yyv1, "")
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = ""
|
||||
} else {
|
||||
yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
yyj1 := 0
|
||||
for ; !r.CheckBreak(); yyj1++ {
|
||||
|
||||
if yyj1 >= len(yyv1) {
|
||||
yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope
|
||||
yyc1 = true
|
||||
}
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if yyj1 < len(yyv1) {
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = ""
|
||||
} else {
|
||||
yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
|
||||
}
|
||||
|
||||
} else {
|
||||
z.DecSwallow()
|
||||
}
|
||||
|
||||
}
|
||||
if yyj1 < len(yyv1) {
|
||||
yyv1 = yyv1[:yyj1]
|
||||
yyc1 = true
|
||||
} else if yyj1 == 0 && yyv1 == nil {
|
||||
yyv1 = []ResourceQuotaScope{}
|
||||
yyc1 = true
|
||||
}
|
||||
}
|
||||
yyh1.End()
|
||||
if yyc1 {
|
||||
*v = yyv1
|
||||
}
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
@ -53865,7 +54069,7 @@ func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.
|
||||
|
||||
yyrg1 := len(yyv1) > 0
|
||||
yyv21 := yyv1
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216)
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
|
@ -2645,6 +2645,28 @@ const (
|
||||
ResourceSecrets ResourceName = "secrets"
|
||||
// ResourcePersistentVolumeClaims, number
|
||||
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
|
||||
// CPU request, in cores. (500m = .5 cores)
|
||||
ResourceCPURequest ResourceName = "cpu.request"
|
||||
// CPU limit, in cores. (500m = .5 cores)
|
||||
ResourceCPULimit ResourceName = "cpu.limit"
|
||||
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceMemoryRequest ResourceName = "memory.request"
|
||||
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceMemoryLimit ResourceName = "memory.limit"
|
||||
)
|
||||
|
||||
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
|
||||
type ResourceQuotaScope string
|
||||
|
||||
const (
|
||||
// Match all pod objects where spec.activeDeadlineSeconds
|
||||
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
|
||||
// Match all pod objects where !spec.activeDeadlineSeconds
|
||||
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
|
||||
// Match all pod objects that have best effort quality of service
|
||||
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
|
||||
// Match all pod objects that do not have best effort quality of service
|
||||
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
|
||||
)
|
||||
|
||||
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
|
||||
@ -2652,6 +2674,9 @@ type ResourceQuotaSpec struct {
|
||||
// Hard is the set of desired hard limits for each named resource.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
|
||||
Hard ResourceList `json:"hard,omitempty"`
|
||||
// A collection of filters that must match each object tracked by a quota.
|
||||
// If not specified, the quota matches all objects.
|
||||
Scopes []ResourceQuotaScope `json:"scopes,omitempty"`
|
||||
}
|
||||
|
||||
// ResourceQuotaStatus defines the enforced hard limits and observed use.
|
||||
|
@ -1353,6 +1353,7 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string {
|
||||
var map_ResourceQuotaSpec = map[string]string{
|
||||
"": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
|
||||
"hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota",
|
||||
"scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
|
||||
}
|
||||
|
||||
func (ResourceQuotaSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
var RepairMalformedUpdates bool = true
|
||||
|
||||
const isNegativeErrorMsg string = `must be greater than or equal to 0`
|
||||
const isInvalidQuotaResource string = `must be a standard resource for quota`
|
||||
const fieldImmutableErrorMsg string = `field is immutable`
|
||||
const cIdentifierErrorMsg string = `must be a C identifier (matching regex ` + validation.CIdentifierFmt + `): e.g. "my_name" or "MyName"`
|
||||
const isNotIntegerErrorMsg string = `must be an integer`
|
||||
@ -1971,6 +1972,30 @@ func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
return field.ErrorList{}
|
||||
}
|
||||
|
||||
// Validate container resource name
|
||||
// Refer to docs/design/resources.md for more details.
|
||||
func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validateResourceName(value, fldPath)
|
||||
if len(strings.Split(value, "/")) == 1 {
|
||||
if !api.IsStandardContainerResourceName(value) {
|
||||
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
|
||||
}
|
||||
}
|
||||
return field.ErrorList{}
|
||||
}
|
||||
|
||||
// Validate resource names that can go in a resource quota
|
||||
// Refer to docs/design/resources.md for more details.
|
||||
func validateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validateResourceName(value, fldPath)
|
||||
if len(strings.Split(value, "/")) == 1 {
|
||||
if !api.IsStandardQuotaResourceName(value) {
|
||||
return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource))
|
||||
}
|
||||
}
|
||||
return field.ErrorList{}
|
||||
}
|
||||
|
||||
// ValidateLimitRange tests if required fields in the LimitRange are set.
|
||||
func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList {
|
||||
allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata"))
|
||||
@ -1995,12 +2020,12 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList {
|
||||
maxLimitRequestRatios := map[string]resource.Quantity{}
|
||||
|
||||
for k, q := range limit.Max {
|
||||
allErrs = append(allErrs, validateResourceName(string(k), idxPath.Child("max").Key(string(k)))...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(k), idxPath.Child("max").Key(string(k)))...)
|
||||
keys.Insert(string(k))
|
||||
max[string(k)] = q
|
||||
}
|
||||
for k, q := range limit.Min {
|
||||
allErrs = append(allErrs, validateResourceName(string(k), idxPath.Child("min").Key(string(k)))...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(k), idxPath.Child("min").Key(string(k)))...)
|
||||
keys.Insert(string(k))
|
||||
min[string(k)] = q
|
||||
}
|
||||
@ -2014,19 +2039,19 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList {
|
||||
}
|
||||
} else {
|
||||
for k, q := range limit.Default {
|
||||
allErrs = append(allErrs, validateResourceName(string(k), idxPath.Child("default").Key(string(k)))...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(k), idxPath.Child("default").Key(string(k)))...)
|
||||
keys.Insert(string(k))
|
||||
defaults[string(k)] = q
|
||||
}
|
||||
for k, q := range limit.DefaultRequest {
|
||||
allErrs = append(allErrs, validateResourceName(string(k), idxPath.Child("defaultRequest").Key(string(k)))...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(k), idxPath.Child("defaultRequest").Key(string(k)))...)
|
||||
keys.Insert(string(k))
|
||||
defaultRequests[string(k)] = q
|
||||
}
|
||||
}
|
||||
|
||||
for k, q := range limit.MaxLimitRequestRatio {
|
||||
allErrs = append(allErrs, validateResourceName(string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...)
|
||||
keys.Insert(string(k))
|
||||
maxLimitRequestRatios[string(k)] = q
|
||||
}
|
||||
@ -2249,7 +2274,7 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat
|
||||
for resourceName, quantity := range requirements.Limits {
|
||||
fldPath := limPath.Key(string(resourceName))
|
||||
// Validate resource name.
|
||||
allErrs = append(allErrs, validateResourceName(string(resourceName), fldPath)...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
|
||||
if api.IsStandardResourceName(string(resourceName)) {
|
||||
allErrs = append(allErrs, validateBasicResource(quantity, fldPath.Key(string(resourceName)))...)
|
||||
}
|
||||
@ -2265,7 +2290,7 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat
|
||||
for resourceName, quantity := range requirements.Requests {
|
||||
fldPath := reqPath.Key(string(resourceName))
|
||||
// Validate resource name.
|
||||
allErrs = append(allErrs, validateResourceName(string(resourceName), fldPath)...)
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
|
||||
if api.IsStandardResourceName(string(resourceName)) {
|
||||
allErrs = append(allErrs, validateBasicResource(quantity, fldPath.Key(string(resourceName)))...)
|
||||
}
|
||||
@ -2273,6 +2298,41 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes
|
||||
func validateResourceQuotaScopes(resourceQuota *api.ResourceQuota) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(resourceQuota.Spec.Scopes) == 0 {
|
||||
return allErrs
|
||||
}
|
||||
hardLimits := sets.NewString()
|
||||
for k := range resourceQuota.Spec.Hard {
|
||||
hardLimits.Insert(string(k))
|
||||
}
|
||||
fldPath := field.NewPath("spec", "scopes")
|
||||
scopeSet := sets.NewString()
|
||||
for _, scope := range resourceQuota.Spec.Scopes {
|
||||
if !api.IsStandardResourceQuotaScope(string(scope)) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuota.Spec.Scopes, "unsupported scope"))
|
||||
}
|
||||
for _, k := range hardLimits.List() {
|
||||
if api.IsStandardQuotaResourceName(k) && !api.IsResourceQuotaScopeValidForResource(scope, k) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuota.Spec.Scopes, "unsupported scope applied to resource"))
|
||||
}
|
||||
}
|
||||
scopeSet.Insert(string(scope))
|
||||
}
|
||||
invalidScopePairs := []sets.String{
|
||||
sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)),
|
||||
sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)),
|
||||
}
|
||||
for _, invalidScopePair := range invalidScopePairs {
|
||||
if scopeSet.HasAll(invalidScopePair.List()...) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuota.Spec.Scopes, "conflicting scopes"))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateResourceQuota tests if required fields in the ResourceQuota are set.
|
||||
func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList {
|
||||
allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata"))
|
||||
@ -2280,21 +2340,24 @@ func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList {
|
||||
fldPath := field.NewPath("spec", "hard")
|
||||
for k, v := range resourceQuota.Spec.Hard {
|
||||
resPath := fldPath.Key(string(k))
|
||||
allErrs = append(allErrs, validateResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuota)...)
|
||||
|
||||
fldPath = field.NewPath("status", "hard")
|
||||
for k, v := range resourceQuota.Status.Hard {
|
||||
resPath := fldPath.Key(string(k))
|
||||
allErrs = append(allErrs, validateResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
fldPath = field.NewPath("status", "used")
|
||||
for k, v := range resourceQuota.Status.Used {
|
||||
resPath := fldPath.Key(string(k))
|
||||
allErrs = append(allErrs, validateResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -2317,9 +2380,25 @@ func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.Resourc
|
||||
fldPath := field.NewPath("spec", "hard")
|
||||
for k, v := range newResourceQuota.Spec.Hard {
|
||||
resPath := fldPath.Key(string(k))
|
||||
allErrs = append(allErrs, validateResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
|
||||
// ensure scopes cannot change, and that resources are still valid for scope
|
||||
fldPath = field.NewPath("spec", "scopes")
|
||||
oldScopes := sets.NewString()
|
||||
newScopes := sets.NewString()
|
||||
for _, scope := range newResourceQuota.Spec.Scopes {
|
||||
newScopes.Insert(string(scope))
|
||||
}
|
||||
for _, scope := range oldResourceQuota.Spec.Scopes {
|
||||
oldScopes.Insert(string(scope))
|
||||
}
|
||||
if !oldScopes.Equal(newScopes) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, "field is immutable"))
|
||||
}
|
||||
allErrs = append(allErrs, validateResourceQuotaScopes(newResourceQuota)...)
|
||||
|
||||
newResourceQuota.Status = oldResourceQuota.Status
|
||||
return allErrs
|
||||
}
|
||||
@ -2334,13 +2413,13 @@ func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.R
|
||||
fldPath := field.NewPath("status", "hard")
|
||||
for k, v := range newResourceQuota.Status.Hard {
|
||||
resPath := fldPath.Key(string(k))
|
||||
allErrs = append(allErrs, validateResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
fldPath = field.NewPath("status", "used")
|
||||
for k, v := range newResourceQuota.Status.Used {
|
||||
resPath := fldPath.Key(string(k))
|
||||
allErrs = append(allErrs, validateResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...)
|
||||
allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
newResourceQuota.Spec = oldResourceQuota.Spec
|
||||
|
@ -4044,6 +4044,10 @@ func TestValidateResourceQuota(t *testing.T) {
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100"),
|
||||
api.ResourceMemory: resource.MustParse("10000"),
|
||||
api.ResourceRequestsCPU: resource.MustParse("100"),
|
||||
api.ResourceRequestsMemory: resource.MustParse("10000"),
|
||||
api.ResourceLimitsCPU: resource.MustParse("100"),
|
||||
api.ResourceLimitsMemory: resource.MustParse("10000"),
|
||||
api.ResourcePods: resource.MustParse("10"),
|
||||
api.ResourceServices: resource.MustParse("0"),
|
||||
api.ResourceReplicationControllers: resource.MustParse("10"),
|
||||
@ -4051,6 +4055,42 @@ func TestValidateResourceQuota(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
terminatingSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100"),
|
||||
api.ResourceLimitsCPU: resource.MustParse("200"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating},
|
||||
}
|
||||
|
||||
nonTerminatingSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotTerminating},
|
||||
}
|
||||
|
||||
bestEffortSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
}
|
||||
|
||||
nonBestEffortSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotBestEffort},
|
||||
}
|
||||
|
||||
// storage is not yet supported as a quota tracked resource
|
||||
invalidQuotaResourceSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceStorage: resource.MustParse("10"),
|
||||
},
|
||||
}
|
||||
|
||||
negativeSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("-100"),
|
||||
@ -4077,6 +4117,27 @@ func TestValidateResourceQuota(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
invalidTerminatingScopePairsSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating},
|
||||
}
|
||||
|
||||
invalidBestEffortScopePairsSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort, api.ResourceQuotaScopeNotBestEffort},
|
||||
}
|
||||
|
||||
invalidScopeNameSpec := api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScope("foo")},
|
||||
}
|
||||
|
||||
successCases := []api.ResourceQuota{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
@ -4092,6 +4153,34 @@ func TestValidateResourceQuota(t *testing.T) {
|
||||
},
|
||||
Spec: fractionalComputeSpec,
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "foo",
|
||||
},
|
||||
Spec: terminatingSpec,
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "foo",
|
||||
},
|
||||
Spec: nonTerminatingSpec,
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "foo",
|
||||
},
|
||||
Spec: bestEffortSpec,
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "foo",
|
||||
},
|
||||
Spec: nonBestEffortSpec,
|
||||
},
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
@ -4128,6 +4217,22 @@ func TestValidateResourceQuota(t *testing.T) {
|
||||
api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: fractionalPodSpec},
|
||||
isNotIntegerErrorMsg,
|
||||
},
|
||||
"invalid-quota-resource": {
|
||||
api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidQuotaResourceSpec},
|
||||
isInvalidQuotaResource,
|
||||
},
|
||||
"invalid-quota-terminating-pair": {
|
||||
api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidTerminatingScopePairsSpec},
|
||||
"conflicting scopes",
|
||||
},
|
||||
"invalid-quota-besteffort-pair": {
|
||||
api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidBestEffortScopePairsSpec},
|
||||
"conflicting scopes",
|
||||
},
|
||||
"invalid-quota-scope-name": {
|
||||
api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidScopeNameSpec},
|
||||
"unsupported scope",
|
||||
},
|
||||
}
|
||||
for k, v := range errorCases {
|
||||
errs := ValidateResourceQuota(&v.R)
|
||||
|
194
pkg/controller/resourcequota/replenishment_controller.go
Normal file
194
pkg/controller/resourcequota/replenishment_controller.go
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// ReplenishmentFunc is a function that is invoked when controller sees a change
|
||||
// that may require a quota to be replenished (i.e. object deletion, or object moved to terminal state)
|
||||
type ReplenishmentFunc func(groupKind unversioned.GroupKind, namespace string, object runtime.Object)
|
||||
|
||||
// ReplenishmentControllerOptions is an options struct that tells a factory
|
||||
// how to configure a controller that can inform the quota system it should
|
||||
// replenish quota
|
||||
type ReplenishmentControllerOptions struct {
|
||||
// The kind monitored for replenishment
|
||||
GroupKind unversioned.GroupKind
|
||||
// The period that should be used to re-sync the monitored resource
|
||||
ResyncPeriod controller.ResyncPeriodFunc
|
||||
// The function to invoke when a change is observed that should trigger
|
||||
// replenishment
|
||||
ReplenishmentFunc ReplenishmentFunc
|
||||
}
|
||||
|
||||
// PodReplenishmentUpdateFunc will replenish if the old pod was quota tracked but the new is not
|
||||
func PodReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
|
||||
return func(oldObj, newObj interface{}) {
|
||||
oldPod := oldObj.(*api.Pod)
|
||||
newPod := newObj.(*api.Pod)
|
||||
if core.QuotaPod(oldPod) && !core.QuotaPod(newPod) {
|
||||
options.ReplenishmentFunc(options.GroupKind, newPod.Namespace, newPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectReplenenishmentDeleteFunc will replenish on every delete
|
||||
func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func(obj interface{}) {
|
||||
return func(obj interface{}) {
|
||||
metaObject, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("replenishment controller could not get object from tombstone %+v, could take up to %v before quota is replenished", obj, options.ResyncPeriod())
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
metaObject, err = meta.Accessor(tombstone.Obj)
|
||||
if err != nil {
|
||||
glog.Errorf("replenishment controller tombstone contained object that is not a meta %+v, could take up to %v before quota is replenished", tombstone.Obj, options.ResyncPeriod())
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
options.ReplenishmentFunc(options.GroupKind, metaObject.GetNamespace(), nil)
|
||||
}
|
||||
}
|
||||
|
||||
// ReplenishmentControllerFactory knows how to build replenishment controllers
|
||||
type ReplenishmentControllerFactory interface {
|
||||
// NewController returns a controller configured with the specified options
|
||||
NewController(options *ReplenishmentControllerOptions) (*framework.Controller, error)
|
||||
}
|
||||
|
||||
// replenishmentControllerFactory implements ReplenishmentControllerFactory
|
||||
type replenishmentControllerFactory struct {
|
||||
kubeClient clientset.Interface
|
||||
}
|
||||
|
||||
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers
|
||||
// to replenish resources when updated or deleted
|
||||
func NewReplenishmentControllerFactory(kubeClient clientset.Interface) ReplenishmentControllerFactory {
|
||||
return &replenishmentControllerFactory{
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (*framework.Controller, error) {
|
||||
var result *framework.Controller
|
||||
switch options.GroupKind {
|
||||
case api.Kind("Pod"):
|
||||
_, result = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: PodReplenishmentUpdateFunc(options),
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("Service"):
|
||||
_, result = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Services(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("ReplicationController"):
|
||||
_, result = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("PersistentVolumeClaim"):
|
||||
_, result = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("Secret"):
|
||||
_, result = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Secrets(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
default:
|
||||
return nil, fmt.Errorf("no replenishment controller available for %s", options.GroupKind)
|
||||
}
|
||||
return result, nil
|
||||
}
|
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// testReplenishment lets us test replenishment functions are invoked
|
||||
type testReplenishment struct {
|
||||
groupKind unversioned.GroupKind
|
||||
namespace string
|
||||
}
|
||||
|
||||
// mock function that holds onto the last kind that was replenished
|
||||
func (t *testReplenishment) Replenish(groupKind unversioned.GroupKind, namespace string, object runtime.Object) {
|
||||
t.groupKind = groupKind
|
||||
t.namespace = namespace
|
||||
}
|
||||
|
||||
func TestPodReplenishmentUpdateFunc(t *testing.T) {
|
||||
mockReplenish := &testReplenishment{}
|
||||
options := ReplenishmentControllerOptions{
|
||||
GroupKind: api.Kind("Pod"),
|
||||
ReplenishmentFunc: mockReplenish.Replenish,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
}
|
||||
newPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
}
|
||||
updateFunc := PodReplenishmentUpdateFunc(&options)
|
||||
updateFunc(oldPod, newPod)
|
||||
if mockReplenish.groupKind != api.Kind("Pod") {
|
||||
t.Errorf("Unexpected group kind %v", mockReplenish.groupKind)
|
||||
}
|
||||
if mockReplenish.namespace != oldPod.Namespace {
|
||||
t.Errorf("Unexpected namespace %v", mockReplenish.namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectReplenishmentDeleteFunc(t *testing.T) {
|
||||
mockReplenish := &testReplenishment{}
|
||||
options := ReplenishmentControllerOptions{
|
||||
GroupKind: api.Kind("Pod"),
|
||||
ReplenishmentFunc: mockReplenish.Replenish,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
}
|
||||
deleteFunc := ObjectReplenishmentDeleteFunc(&options)
|
||||
deleteFunc(oldPod)
|
||||
if mockReplenish.groupKind != api.Kind("Pod") {
|
||||
t.Errorf("Unexpected group kind %v", mockReplenish.groupKind)
|
||||
}
|
||||
if mockReplenish.namespace != oldPod.Namespace {
|
||||
t.Errorf("Unexpected namespace %v", mockReplenish.namespace)
|
||||
}
|
||||
}
|
@ -17,16 +17,17 @@ limitations under the License.
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -34,6 +35,21 @@ import (
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// ResourceQuotaControllerOptions holds options for creating a quota controller
|
||||
type ResourceQuotaControllerOptions struct {
|
||||
// Must have authority to list all quotas, and update quota status
|
||||
KubeClient clientset.Interface
|
||||
// Controls full recalculation of quota usage
|
||||
ResyncPeriod controller.ResyncPeriodFunc
|
||||
// Knows how to calculate usage
|
||||
Registry quota.Registry
|
||||
// Knows how to build controllers that notify replenishment events
|
||||
ControllerFactory ReplenishmentControllerFactory
|
||||
// List of GroupKind objects that should be monitored for replenishment at
|
||||
// a faster frequency than the quota controller recalculation interval
|
||||
GroupKindsToReplenish []unversioned.GroupKind
|
||||
}
|
||||
|
||||
// ResourceQuotaController is responsible for tracking quota usage status in the system
|
||||
type ResourceQuotaController struct {
|
||||
// Must have authority to list all resources in the system, and update quota status
|
||||
@ -42,27 +58,32 @@ type ResourceQuotaController struct {
|
||||
rqIndexer cache.Indexer
|
||||
// Watches changes to all resource quota
|
||||
rqController *framework.Controller
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
// Watches changes to all pods (so we can optimize release of compute resources)
|
||||
podController *framework.Controller
|
||||
// ResourceQuota objects that need to be synchronized
|
||||
queue *workqueue.Type
|
||||
// To allow injection of syncUsage for testing.
|
||||
syncHandler func(key string) error
|
||||
// function that controls full recalculation of quota usage
|
||||
resyncPeriod controller.ResyncPeriodFunc
|
||||
// knows how to calculate usage
|
||||
registry quota.Registry
|
||||
// controllers monitoring to notify for replenishment
|
||||
replenishmentControllers []*framework.Controller
|
||||
}
|
||||
|
||||
// NewResourceQuotaController creates a new ResourceQuotaController
|
||||
func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *ResourceQuotaController {
|
||||
|
||||
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
|
||||
// build the resource quota controller
|
||||
rq := &ResourceQuotaController{
|
||||
kubeClient: kubeClient,
|
||||
kubeClient: options.KubeClient,
|
||||
queue: workqueue.New(),
|
||||
resyncPeriod: resyncPeriod,
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
registry: options.Registry,
|
||||
replenishmentControllers: []*framework.Controller{},
|
||||
}
|
||||
|
||||
// set the synchronization handler
|
||||
rq.syncHandler = rq.syncResourceQuotaFromKey
|
||||
|
||||
// build the controller that observes quota
|
||||
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
@ -73,7 +94,7 @@ func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod con
|
||||
},
|
||||
},
|
||||
&api.ResourceQuota{},
|
||||
resyncPeriod(),
|
||||
rq.resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: rq.enqueueResourceQuota,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
@ -87,10 +108,9 @@ func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod con
|
||||
// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
|
||||
oldResourceQuota := old.(*api.ResourceQuota)
|
||||
curResourceQuota := cur.(*api.ResourceQuota)
|
||||
if api.Semantic.DeepEqual(oldResourceQuota.Spec.Hard, curResourceQuota.Status.Hard) {
|
||||
if quota.Equals(curResourceQuota.Spec.Hard, oldResourceQuota.Spec.Hard) {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Observed updated quota spec for %v/%v", curResourceQuota.Namespace, curResourceQuota.Name)
|
||||
rq.enqueueResourceQuota(curResourceQuota)
|
||||
},
|
||||
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
||||
@ -101,26 +121,19 @@ func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod con
|
||||
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
// We use this pod controller to rapidly observe when a pod deletion occurs in order to
|
||||
// release compute resources from any associated quota.
|
||||
rq.podStore.Store, rq.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: rq.deletePod,
|
||||
},
|
||||
)
|
||||
|
||||
// set the synchronization handler
|
||||
rq.syncHandler = rq.syncResourceQuotaFromKey
|
||||
for _, groupKindToReplenish := range options.GroupKindsToReplenish {
|
||||
controllerOptions := &ReplenishmentControllerOptions{
|
||||
GroupKind: groupKindToReplenish,
|
||||
ResyncPeriod: options.ResyncPeriod,
|
||||
ReplenishmentFunc: rq.replenishQuota,
|
||||
}
|
||||
replenishmentController, err := options.ControllerFactory.NewController(controllerOptions)
|
||||
if err != nil {
|
||||
glog.Warningf("quota controller unable to replenish %s due to %v, changes only accounted during full resync", groupKindToReplenish, err)
|
||||
} else {
|
||||
rq.replenishmentControllers = append(rq.replenishmentControllers, replenishmentController)
|
||||
}
|
||||
}
|
||||
return rq
|
||||
}
|
||||
|
||||
@ -155,6 +168,7 @@ func (rq *ResourceQuotaController) worker() {
|
||||
err := rq.syncHandler(key.(string))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
rq.queue.Add(key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -164,45 +178,21 @@ func (rq *ResourceQuotaController) worker() {
|
||||
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
go rq.rqController.Run(stopCh)
|
||||
go rq.podController.Run(stopCh)
|
||||
// the controllers that replenish other resources to respond rapidly to state changes
|
||||
for _, replenishmentController := range rq.replenishmentControllers {
|
||||
go replenishmentController.Run(stopCh)
|
||||
}
|
||||
// the workers that chug through the quota calculation backlog
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(rq.worker, time.Second, stopCh)
|
||||
}
|
||||
// the timer for how often we do a full recalculation across all quotas
|
||||
go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
|
||||
<-stopCh
|
||||
glog.Infof("Shutting down ResourceQuotaController")
|
||||
rq.queue.ShutDown()
|
||||
}
|
||||
|
||||
// FilterQuotaPods eliminates pods that no longer have a cost against the quota
|
||||
// pods that have a restart policy of always are always returned
|
||||
// pods that are in a failed state, but have a restart policy of on failure are always returned
|
||||
// pods that are not in a success state or a failure state are included in quota
|
||||
func FilterQuotaPods(pods []api.Pod) []*api.Pod {
|
||||
var result []*api.Pod
|
||||
for i := range pods {
|
||||
value := &pods[i]
|
||||
// a pod that has a restart policy always no matter its state counts against usage
|
||||
if value.Spec.RestartPolicy == api.RestartPolicyAlways {
|
||||
result = append(result, value)
|
||||
continue
|
||||
}
|
||||
// a failed pod with a restart policy of on failure will count against usage
|
||||
if api.PodFailed == value.Status.Phase &&
|
||||
value.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
||||
result = append(result, value)
|
||||
continue
|
||||
}
|
||||
// if the pod is not succeeded or failed, then we count it against quota
|
||||
if api.PodSucceeded != value.Status.Phase &&
|
||||
api.PodFailed != value.Status.Phase {
|
||||
result = append(result, value)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// syncResourceQuotaFromKey syncs a quota key
|
||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
@ -224,115 +214,68 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
return rq.syncResourceQuota(quota)
|
||||
}
|
||||
|
||||
// syncResourceQuota runs a complete sync of current status
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (err error) {
|
||||
|
||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQuota) (err error) {
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard)
|
||||
dirty := !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil)
|
||||
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
|
||||
|
||||
// Create a usage object that is based on the quota resource version
|
||||
// Create a usage object that is based on the quota resource version that will handle updates
|
||||
// by default, we preserve the past usage observation, and set hard to the current spec
|
||||
previousUsed := api.ResourceList{}
|
||||
if resourceQuota.Status.Used != nil {
|
||||
previousUsed = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
|
||||
}
|
||||
usage := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: quota.Name,
|
||||
Namespace: quota.Namespace,
|
||||
ResourceVersion: quota.ResourceVersion,
|
||||
Labels: quota.Labels,
|
||||
Annotations: quota.Annotations},
|
||||
Name: resourceQuota.Name,
|
||||
Namespace: resourceQuota.Namespace,
|
||||
ResourceVersion: resourceQuota.ResourceVersion,
|
||||
Labels: resourceQuota.Labels,
|
||||
Annotations: resourceQuota.Annotations},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
Hard: quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard),
|
||||
Used: previousUsed,
|
||||
},
|
||||
}
|
||||
|
||||
// set the hard values supported on the quota
|
||||
for k, v := range quota.Spec.Hard {
|
||||
usage.Status.Hard[k] = *v.Copy()
|
||||
}
|
||||
// set any last known observed status values for usage
|
||||
for k, v := range quota.Status.Used {
|
||||
usage.Status.Used[k] = *v.Copy()
|
||||
// find the intersection between the hard resources on the quota
|
||||
// and the resources this controller can track to know what we can
|
||||
// look to measure updated usage stats for
|
||||
hardResources := quota.ResourceNames(usage.Status.Hard)
|
||||
potentialResources := []api.ResourceName{}
|
||||
evaluators := rq.registry.Evaluators()
|
||||
for _, evaluator := range evaluators {
|
||||
potentialResources = append(potentialResources, evaluator.MatchesResources()...)
|
||||
}
|
||||
matchedResources := quota.Intersection(hardResources, potentialResources)
|
||||
|
||||
set := map[api.ResourceName]bool{}
|
||||
for k := range usage.Status.Hard {
|
||||
set[k] = true
|
||||
}
|
||||
|
||||
pods := &api.PodList{}
|
||||
if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
|
||||
pods, err = rq.kubeClient.Core().Pods(usage.Namespace).List(api.ListOptions{})
|
||||
// sum the observed usage from each evaluator
|
||||
newUsage := api.ResourceList{}
|
||||
usageStatsOptions := quota.UsageStatsOptions{Namespace: resourceQuota.Namespace, Scopes: resourceQuota.Spec.Scopes}
|
||||
for _, evaluator := range evaluators {
|
||||
stats, err := evaluator.UsageStats(usageStatsOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newUsage = quota.Add(newUsage, stats.Used)
|
||||
}
|
||||
|
||||
filteredPods := FilterQuotaPods(pods.Items)
|
||||
|
||||
// iterate over each resource, and update observation
|
||||
for k := range usage.Status.Hard {
|
||||
|
||||
// look if there is a used value, if none, we are definitely dirty
|
||||
prevQuantity, found := usage.Status.Used[k]
|
||||
if !found {
|
||||
dirty = true
|
||||
// mask the observed usage to only the set of resources tracked by this quota
|
||||
// merge our observed usage with the quota usage status
|
||||
// if the new usage is different than the last usage, we will need to do an update
|
||||
newUsage = quota.Mask(newUsage, matchedResources)
|
||||
for key, value := range newUsage {
|
||||
usage.Status.Used[key] = value
|
||||
}
|
||||
|
||||
var value *resource.Quantity
|
||||
dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)
|
||||
|
||||
switch k {
|
||||
case api.ResourcePods:
|
||||
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
|
||||
case api.ResourceServices:
|
||||
items, err := rq.kubeClient.Core().Services(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceReplicationControllers:
|
||||
items, err := rq.kubeClient.Core().ReplicationControllers(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceQuotas:
|
||||
items, err := rq.kubeClient.Core().ResourceQuotas(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceSecrets:
|
||||
items, err := rq.kubeClient.Core().Secrets(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourcePersistentVolumeClaims:
|
||||
items, err := rq.kubeClient.Core().PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceMemory:
|
||||
value = PodsRequests(filteredPods, api.ResourceMemory)
|
||||
case api.ResourceCPU:
|
||||
value = PodsRequests(filteredPods, api.ResourceCPU)
|
||||
}
|
||||
|
||||
// ignore fields we do not understand (assume another controller is tracking it)
|
||||
if value != nil {
|
||||
// see if the value has changed
|
||||
dirty = dirty || (value.Value() != prevQuantity.Value())
|
||||
// just update the value
|
||||
usage.Status.Used[k] = *value
|
||||
}
|
||||
}
|
||||
|
||||
// update the usage only if it changed
|
||||
// there was a change observed by this controller that requires we update quota
|
||||
if dirty {
|
||||
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
return err
|
||||
@ -340,102 +283,20 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodsRequests returns sum of each resource request for each pod in list
|
||||
// If a given pod in the list does not have a request for the named resource, we log the error
|
||||
// but still attempt to get the most representative count
|
||||
func PodsRequests(pods []*api.Pod, resourceName api.ResourceName) *resource.Quantity {
|
||||
var sum *resource.Quantity
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
podQuantity, err := PodRequests(pod, resourceName)
|
||||
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
|
||||
func (rq *ResourceQuotaController) replenishQuota(groupKind unversioned.GroupKind, namespace string, object runtime.Object) {
|
||||
// TODO: make this support targeted replenishment to a specific kind, right now it does a full replenish
|
||||
indexKey := &api.ResourceQuota{}
|
||||
indexKey.Namespace = namespace
|
||||
resourceQuotas, err := rq.rqIndexer.Index("namespace", indexKey)
|
||||
if err != nil {
|
||||
// log the error, but try to keep the most accurate count possible in log
|
||||
// rationale here is that you may have had pods in a namespace that did not have
|
||||
// explicit requests prior to adding the quota
|
||||
glog.Infof("No explicit request for resource, pod %s/%s, %s", pod.Namespace, pod.Name, resourceName)
|
||||
} else {
|
||||
if sum == nil {
|
||||
sum = podQuantity
|
||||
} else {
|
||||
sum.Add(*podQuantity)
|
||||
glog.Errorf("quota controller could not find ResourceQuota associated with namespace: %s, could take up to %v before a quota replenishes", namespace, rq.resyncPeriod())
|
||||
}
|
||||
}
|
||||
}
|
||||
// if list is empty
|
||||
if sum == nil {
|
||||
q := resource.MustParse("0")
|
||||
sum = &q
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// PodRequests returns sum of each resource request across all containers in pod
|
||||
func PodRequests(pod *api.Pod, resourceName api.ResourceName) (*resource.Quantity, error) {
|
||||
if !PodHasRequests(pod, resourceName) {
|
||||
return nil, fmt.Errorf("Each container in pod %s/%s does not have an explicit request for resource %s.", pod.Namespace, pod.Name, resourceName)
|
||||
}
|
||||
var sum *resource.Quantity
|
||||
for j := range pod.Spec.Containers {
|
||||
value, _ := pod.Spec.Containers[j].Resources.Requests[resourceName]
|
||||
if sum == nil {
|
||||
sum = value.Copy()
|
||||
} else {
|
||||
err := sum.Add(value)
|
||||
if err != nil {
|
||||
return sum, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// if list is empty
|
||||
if sum == nil {
|
||||
q := resource.MustParse("0")
|
||||
sum = &q
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
// PodHasRequests verifies that each container in the pod has an explicit request that is non-zero for a named resource
|
||||
func PodHasRequests(pod *api.Pod, resourceName api.ResourceName) bool {
|
||||
for j := range pod.Spec.Containers {
|
||||
value, valueSet := pod.Spec.Containers[j].Resources.Requests[resourceName]
|
||||
if !valueSet || value.Value() == int64(0) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the quota that manages the pod and update its expectations.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (rq *ResourceQuotaController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the pod
|
||||
// changed labels the new rc will not be woken up till the periodic resync.
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a quota records the deletion", obj, rq.resyncPeriod())
|
||||
if len(resourceQuotas) == 0 {
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before quota records the deletion", obj, rq.resyncPeriod())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
quotas, err := rq.rqIndexer.Index("namespace", pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find resource quota associated with pod %+v, could take up to %v before a quota records the deletion", obj, rq.resyncPeriod())
|
||||
}
|
||||
if len(quotas) == 0 {
|
||||
glog.V(4).Infof("No resource quota associated with namespace %q", pod.Namespace)
|
||||
return
|
||||
}
|
||||
for i := range quotas {
|
||||
quota := quotas[i].(*api.ResourceQuota)
|
||||
rq.enqueueResourceQuota(quota)
|
||||
for i := range resourceQuotas {
|
||||
resourceQuota := resourceQuotas[i].(*api.ResourceQuota)
|
||||
rq.enqueueResourceQuota(resourceQuota)
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,15 +17,16 @@ limitations under the License.
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
@ -47,85 +48,11 @@ func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequ
|
||||
return res
|
||||
}
|
||||
|
||||
func validPod(name string, numContainers int, resources api.ResourceRequirements) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PodSpec{},
|
||||
}
|
||||
pod.Spec.Containers = make([]api.Container, 0, numContainers)
|
||||
for i := 0; i < numContainers; i++ {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
|
||||
Image: "foo:V" + strconv.Itoa(i),
|
||||
Resources: resources,
|
||||
})
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func TestFilterQuotaPods(t *testing.T) {
|
||||
pods := []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-pending"},
|
||||
Status: api.PodStatus{Phase: api.PodPending},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"},
|
||||
Status: api.PodStatus{Phase: api.PodSucceeded},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-unknown"},
|
||||
Status: api.PodStatus{Phase: api.PodUnknown},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyOnFailure,
|
||||
},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
}
|
||||
expectedResults := sets.NewString("pod-running",
|
||||
"pod-pending", "pod-unknown", "pod-failed-with-restart-always",
|
||||
"pod-failed-with-restart-on-failure")
|
||||
|
||||
actualResults := sets.String{}
|
||||
result := FilterQuotaPods(pods)
|
||||
for i := range result {
|
||||
actualResults.Insert(result[i].Name)
|
||||
}
|
||||
|
||||
if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) {
|
||||
t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncResourceQuota(t *testing.T) {
|
||||
podList := api.PodList{
|
||||
Items: []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
@ -133,7 +60,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running-2"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
@ -141,7 +68,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
@ -150,7 +77,8 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
quota := api.ResourceQuota{
|
||||
resourceQuota := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
@ -174,15 +102,43 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(&podList, "a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
kubeClient := fake.NewSimpleClientset(&podList, &resourceQuota)
|
||||
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
|
||||
KubeClient: kubeClient,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
Registry: install.NewRegistry(kubeClient),
|
||||
GroupKindsToReplenish: []unversioned.GroupKind{
|
||||
api.Kind("Pod"),
|
||||
api.Kind("Service"),
|
||||
api.Kind("ReplicationController"),
|
||||
api.Kind("PersistentVolumeClaim"),
|
||||
},
|
||||
ControllerFactory: NewReplenishmentControllerFactory(kubeClient),
|
||||
}
|
||||
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
|
||||
err := quotaController.syncResourceQuota(resourceQuota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
expectedActionSet := sets.NewString(
|
||||
strings.Join([]string{"list", "replicationcontrollers", ""}, "-"),
|
||||
strings.Join([]string{"list", "services", ""}, "-"),
|
||||
strings.Join([]string{"list", "pods", ""}, "-"),
|
||||
strings.Join([]string{"list", "resourcequotas", ""}, "-"),
|
||||
strings.Join([]string{"list", "secrets", ""}, "-"),
|
||||
strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"),
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
)
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
|
||||
usage := kubeClient.Actions()[1].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
@ -204,7 +160,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
quota := api.ResourceQuota{
|
||||
resourceQuota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
@ -231,15 +187,44 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := fake.NewSimpleClientset("a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
kubeClient := fake.NewSimpleClientset(&resourceQuota)
|
||||
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
|
||||
KubeClient: kubeClient,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
Registry: install.NewRegistry(kubeClient),
|
||||
GroupKindsToReplenish: []unversioned.GroupKind{
|
||||
api.Kind("Pod"),
|
||||
api.Kind("Service"),
|
||||
api.Kind("ReplicationController"),
|
||||
api.Kind("PersistentVolumeClaim"),
|
||||
},
|
||||
ControllerFactory: NewReplenishmentControllerFactory(kubeClient),
|
||||
}
|
||||
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
|
||||
err := quotaController.syncResourceQuota(resourceQuota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
usage := kubeClient.Actions()[1].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
expectedActionSet := sets.NewString(
|
||||
strings.Join([]string{"list", "replicationcontrollers", ""}, "-"),
|
||||
strings.Join([]string{"list", "services", ""}, "-"),
|
||||
strings.Join([]string{"list", "pods", ""}, "-"),
|
||||
strings.Join([]string{"list", "resourcequotas", ""}, "-"),
|
||||
strings.Join([]string{"list", "secrets", ""}, "-"),
|
||||
strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"),
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
)
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
@ -262,7 +247,7 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
quota := api.ResourceQuota{
|
||||
resourceQuota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
@ -278,165 +263,37 @@ func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(&api.PodList{}, "a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
kubeClient := fake.NewSimpleClientset(&api.PodList{}, &resourceQuota)
|
||||
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
|
||||
KubeClient: kubeClient,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
Registry: install.NewRegistry(kubeClient),
|
||||
GroupKindsToReplenish: []unversioned.GroupKind{
|
||||
api.Kind("Pod"),
|
||||
api.Kind("Service"),
|
||||
api.Kind("ReplicationController"),
|
||||
api.Kind("PersistentVolumeClaim"),
|
||||
},
|
||||
ControllerFactory: NewReplenishmentControllerFactory(kubeClient),
|
||||
}
|
||||
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
|
||||
err := quotaController.syncResourceQuota(resourceQuota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
if len(actions) != 1 && !actions[0].Matches("list", "pods") {
|
||||
t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodHasRequests(t *testing.T) {
|
||||
type testCase struct {
|
||||
pod *api.Pod
|
||||
resourceName api.ResourceName
|
||||
expectedResult bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: validPod("request-cpu", 2, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-cpu", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-cpu", 2, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-memory", 2, getResourceRequirements(getResourceList("", "2Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-memory", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-memory", 2, getResourceRequirements(getResourceList("", "0"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
for _, item := range testCases {
|
||||
if actual := PodHasRequests(item.pod, item.resourceName); item.expectedResult != actual {
|
||||
t.Errorf("Pod %s for resource %s expected %v actual %v", item.pod.Name, item.resourceName, item.expectedResult, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodRequests(t *testing.T) {
|
||||
type testCase struct {
|
||||
pod *api.Pod
|
||||
resourceName api.ResourceName
|
||||
expectedResult string
|
||||
expectedError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: validPod("request-cpu", 2, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "200m",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-cpu", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-cpu", 2, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-memory", 2, getResourceRequirements(getResourceList("", "500Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "1000Mi",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-memory", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-memory", 2, getResourceRequirements(getResourceList("", "0"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
for _, item := range testCases {
|
||||
actual, err := PodRequests(item.pod, item.resourceName)
|
||||
if item.expectedError != (err != nil) {
|
||||
t.Errorf("Unexpected error result for pod %s for resource %s expected error %v got %v", item.pod.Name, item.resourceName, item.expectedError, err)
|
||||
}
|
||||
if item.expectedResult != "" && (item.expectedResult != actual.String()) {
|
||||
t.Errorf("Expected %s, Actual %s, pod %s for resource %s", item.expectedResult, actual.String(), item.pod.Name, item.resourceName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodsRequests(t *testing.T) {
|
||||
type testCase struct {
|
||||
pods []*api.Pod
|
||||
resourceName api.ResourceName
|
||||
expectedResult string
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("request-cpu-1", 1, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))),
|
||||
validPod("request-cpu-2", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "1100m",
|
||||
},
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("no-request-cpu-1", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
validPod("no-request-cpu-2", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
},
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("request-zero-cpu-1", 1, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
validPod("request-zero-cpu-1", 1, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
},
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("request-memory-1", 1, getResourceRequirements(getResourceList("", "500Mi"), getResourceList("", ""))),
|
||||
validPod("request-memory-2", 1, getResourceRequirements(getResourceList("", "1Gi"), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "1524Mi",
|
||||
},
|
||||
}
|
||||
for _, item := range testCases {
|
||||
actual := PodsRequests(item.pods, item.resourceName)
|
||||
if item.expectedResult != "" && (item.expectedResult != actual.String()) {
|
||||
t.Errorf("Expected %s, Actual %s, pod %s for resource %s", item.expectedResult, actual.String(), item.pods[0].Name, item.resourceName)
|
||||
}
|
||||
expectedActionSet := sets.NewString(
|
||||
strings.Join([]string{"list", "replicationcontrollers", ""}, "-"),
|
||||
strings.Join([]string{"list", "services", ""}, "-"),
|
||||
strings.Join([]string{"list", "pods", ""}, "-"),
|
||||
strings.Join([]string{"list", "resourcequotas", ""}, "-"),
|
||||
strings.Join([]string{"list", "secrets", ""}, "-"),
|
||||
strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"),
|
||||
)
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
}
|
||||
|
@ -262,37 +262,42 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w io.Writer) {
|
||||
fmt.Fprint(w, "No resource quota.\n")
|
||||
return
|
||||
}
|
||||
resources := []api.ResourceName{}
|
||||
hard := map[api.ResourceName]resource.Quantity{}
|
||||
used := map[api.ResourceName]resource.Quantity{}
|
||||
sort.Sort(SortableResourceQuotas(quotas.Items))
|
||||
|
||||
fmt.Fprint(w, "Resource Quotas")
|
||||
for _, q := range quotas.Items {
|
||||
fmt.Fprintf(w, "\n Name:\t%s\n", q.Name)
|
||||
if len(q.Spec.Scopes) > 0 {
|
||||
scopes := []string{}
|
||||
for _, scope := range q.Spec.Scopes {
|
||||
scopes = append(scopes, string(scope))
|
||||
}
|
||||
sort.Strings(scopes)
|
||||
fmt.Fprintf(w, " Scopes:\t%s\n", strings.Join(scopes, ", "))
|
||||
for _, scope := range scopes {
|
||||
helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope))
|
||||
if len(helpText) > 0 {
|
||||
fmt.Fprintf(w, " * %s\n", helpText)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, " Resource\tUsed\tHard\n")
|
||||
fmt.Fprint(w, " --------\t---\t---\n")
|
||||
|
||||
resources := []api.ResourceName{}
|
||||
for resource := range q.Status.Hard {
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
sort.Sort(SortableResourceNames(resources))
|
||||
|
||||
for _, resource := range resources {
|
||||
hardQuantity := q.Status.Hard[resource]
|
||||
usedQuantity := q.Status.Used[resource]
|
||||
|
||||
// if for some reason there are multiple quota documents, we take least permissive
|
||||
prevQuantity, ok := hard[resource]
|
||||
if ok {
|
||||
if hardQuantity.Value() < prevQuantity.Value() {
|
||||
hard[resource] = hardQuantity
|
||||
}
|
||||
} else {
|
||||
hard[resource] = hardQuantity
|
||||
}
|
||||
used[resource] = usedQuantity
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(SortableResourceNames(resources))
|
||||
fmt.Fprint(w, "Resource Quotas\n Resource\tUsed\tHard\n")
|
||||
fmt.Fprint(w, " ---\t---\t---\n")
|
||||
for _, resource := range resources {
|
||||
hardQuantity := hard[resource]
|
||||
usedQuantity := used[resource]
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LimitRangeDescriber generates information about a limit range
|
||||
type LimitRangeDescriber struct {
|
||||
@ -397,10 +402,38 @@ func (d *ResourceQuotaDescriber) Describe(namespace, name string) (string, error
|
||||
return describeQuota(resourceQuota)
|
||||
}
|
||||
|
||||
func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string {
|
||||
switch scope {
|
||||
case api.ResourceQuotaScopeTerminating:
|
||||
return "Matches all pods that have an active deadline."
|
||||
case api.ResourceQuotaScopeNotTerminating:
|
||||
return "Matches all pods that do not have an active deadline."
|
||||
case api.ResourceQuotaScopeBestEffort:
|
||||
return "Matches all pods that have best effort quality of service."
|
||||
case api.ResourceQuotaScopeNotBestEffort:
|
||||
return "Matches all pods that do not have best effort quality of service."
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
func describeQuota(resourceQuota *api.ResourceQuota) (string, error) {
|
||||
return tabbedString(func(out io.Writer) error {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", resourceQuota.Name)
|
||||
fmt.Fprintf(out, "Namespace:\t%s\n", resourceQuota.Namespace)
|
||||
if len(resourceQuota.Spec.Scopes) > 0 {
|
||||
scopes := []string{}
|
||||
for _, scope := range resourceQuota.Spec.Scopes {
|
||||
scopes = append(scopes, string(scope))
|
||||
}
|
||||
sort.Strings(scopes)
|
||||
fmt.Fprintf(out, "Scopes:\t%s\n", strings.Join(scopes, ", "))
|
||||
for _, scope := range scopes {
|
||||
helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope))
|
||||
if len(helpText) > 0 {
|
||||
fmt.Fprintf(out, " * %s\n", helpText)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(out, "Resource\tUsed\tHard\n")
|
||||
fmt.Fprintf(out, "--------\t----\t----\n")
|
||||
|
||||
|
@ -33,3 +33,17 @@ func (list SortableResourceNames) Swap(i, j int) {
|
||||
func (list SortableResourceNames) Less(i, j int) bool {
|
||||
return list[i] < list[j]
|
||||
}
|
||||
|
||||
type SortableResourceQuotas []api.ResourceQuota
|
||||
|
||||
func (list SortableResourceQuotas) Len() int {
|
||||
return len(list)
|
||||
}
|
||||
|
||||
func (list SortableResourceQuotas) Swap(i, j int) {
|
||||
list[i], list[j] = list[j], list[i]
|
||||
}
|
||||
|
||||
func (list SortableResourceQuotas) Less(i, j int) bool {
|
||||
return list[i].Name < list[j].Name
|
||||
}
|
||||
|
18
pkg/quota/evaluator/core/doc.go
Normal file
18
pkg/quota/evaluator/core/doc.go
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// core contains modules that interface with the core api group
|
||||
package core
|
45
pkg/quota/evaluator/core/persistent_volume_claims.go
Normal file
45
pkg/quota/evaluator/core/persistent_volume_claims.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
|
||||
func NewPersistentVolumeClaimEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||
allResources := []api.ResourceName{api.ResourcePersistentVolumeClaims}
|
||||
return &generic.GenericEvaluator{
|
||||
Name: "Evaluator.PersistentVolumeClaim",
|
||||
InternalGroupKind: api.Kind("PersistentVolumeClaim"),
|
||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
||||
admission.Create: allResources,
|
||||
},
|
||||
MatchedResourceNames: allResources,
|
||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourcePersistentVolumeClaims),
|
||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourcePersistentVolumeClaims),
|
||||
ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().PersistentVolumeClaims(namespace).List(options)
|
||||
},
|
||||
}
|
||||
}
|
183
pkg/quota/evaluator/core/pods.go
Normal file
183
pkg/quota/evaluator/core/pods.go
Normal file
@ -0,0 +1,183 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos/util"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// NewPodEvaluator returns an evaluator that can evaluate pods
|
||||
func NewPodEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||
computeResources := []api.ResourceName{
|
||||
api.ResourceCPU,
|
||||
api.ResourceMemory,
|
||||
api.ResourceRequestsCPU,
|
||||
api.ResourceRequestsMemory,
|
||||
api.ResourceLimitsCPU,
|
||||
api.ResourceLimitsMemory,
|
||||
}
|
||||
allResources := append(computeResources, api.ResourcePods)
|
||||
return &generic.GenericEvaluator{
|
||||
Name: "Evaluator.Pod",
|
||||
InternalGroupKind: api.Kind("Pod"),
|
||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
||||
admission.Create: allResources,
|
||||
admission.Update: computeResources,
|
||||
},
|
||||
GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
|
||||
return kubeClient.Core().Pods(namespace).Get(name)
|
||||
},
|
||||
ConstraintsFunc: PodConstraintsFunc,
|
||||
MatchedResourceNames: allResources,
|
||||
MatchesScopeFunc: PodMatchesScopeFunc,
|
||||
UsageFunc: PodUsageFunc,
|
||||
ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().Pods(namespace).List(options)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PodConstraintsFunc verifies that all required resources are present on the pod
|
||||
func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
|
||||
pod, ok := object.(*api.Pod)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unexpected input object %v", object)
|
||||
}
|
||||
|
||||
// TODO: fix this when we have pod level cgroups
|
||||
// since we do not yet pod level requests/limits, we need to ensure each
|
||||
// container makes an explict request or limit for a quota tracked resource
|
||||
requiredSet := quota.ToSet(required)
|
||||
missingSet := sets.NewString()
|
||||
for i := range pod.Spec.Containers {
|
||||
requests := pod.Spec.Containers[i].Resources.Requests
|
||||
limits := pod.Spec.Containers[i].Resources.Limits
|
||||
containerUsage := podUsageHelper(requests, limits)
|
||||
containerSet := quota.ToSet(quota.ResourceNames(containerUsage))
|
||||
if !containerSet.Equal(requiredSet) {
|
||||
difference := requiredSet.Difference(containerSet)
|
||||
missingSet.Insert(difference.List()...)
|
||||
}
|
||||
}
|
||||
if len(missingSet) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
||||
}
|
||||
|
||||
// podUsageHelper can summarize the pod quota usage based on requests and limits
|
||||
func podUsageHelper(requests api.ResourceList, limits api.ResourceList) api.ResourceList {
|
||||
result := api.ResourceList{}
|
||||
result[api.ResourcePods] = resource.MustParse("1")
|
||||
if request, found := requests[api.ResourceCPU]; found {
|
||||
result[api.ResourceCPU] = request
|
||||
result[api.ResourceRequestsCPU] = request
|
||||
}
|
||||
if limit, found := limits[api.ResourceCPU]; found {
|
||||
result[api.ResourceLimitsCPU] = limit
|
||||
}
|
||||
if request, found := requests[api.ResourceMemory]; found {
|
||||
result[api.ResourceMemory] = request
|
||||
result[api.ResourceRequestsMemory] = request
|
||||
}
|
||||
if limit, found := limits[api.ResourceMemory]; found {
|
||||
result[api.ResourceLimitsMemory] = limit
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PodUsageFunc knows how to measure usage associated with pods
|
||||
func PodUsageFunc(object runtime.Object) api.ResourceList {
|
||||
pod, ok := object.(*api.Pod)
|
||||
if !ok {
|
||||
return api.ResourceList{}
|
||||
}
|
||||
|
||||
// by convention, we do not quota pods that have reached an end-of-life state
|
||||
if !QuotaPod(pod) {
|
||||
return api.ResourceList{}
|
||||
}
|
||||
|
||||
// TODO: fix this when we have pod level cgroups
|
||||
// when we have pod level cgroups, we can just read pod level requests/limits
|
||||
requests := api.ResourceList{}
|
||||
limits := api.ResourceList{}
|
||||
for i := range pod.Spec.Containers {
|
||||
requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests)
|
||||
limits = quota.Add(limits, pod.Spec.Containers[i].Resources.Limits)
|
||||
}
|
||||
|
||||
return podUsageHelper(requests, limits)
|
||||
}
|
||||
|
||||
// PodMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope
|
||||
func PodMatchesScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bool {
|
||||
pod, ok := object.(*api.Pod)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
switch scope {
|
||||
case api.ResourceQuotaScopeTerminating:
|
||||
return isTerminating(pod)
|
||||
case api.ResourceQuotaScopeNotTerminating:
|
||||
return !isTerminating(pod)
|
||||
case api.ResourceQuotaScopeBestEffort:
|
||||
return isBestEffort(pod)
|
||||
case api.ResourceQuotaScopeNotBestEffort:
|
||||
return !isBestEffort(pod)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isBestEffort(pod *api.Pod) bool {
|
||||
// TODO: when we have request/limits on a pod scope, we need to revisit this
|
||||
for _, container := range pod.Spec.Containers {
|
||||
qosPerResource := util.GetQoS(&container)
|
||||
for _, qos := range qosPerResource {
|
||||
if util.BestEffort == qos {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isTerminating(pod *api.Pod) bool {
|
||||
if pod.Spec.ActiveDeadlineSeconds != nil && *pod.Spec.ActiveDeadlineSeconds >= int64(0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// QuotaPod returns true if the pod is eligible to track against a quota
|
||||
// if it's not in a terminal state according to its phase.
|
||||
func QuotaPod(pod *api.Pod) bool {
|
||||
// see GetPhase in kubelet.go for details on how it covers all restart policy conditions
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet.go#L3001
|
||||
return !(api.PodFailed == pod.Status.Phase || api.PodSucceeded == pod.Status.Phase)
|
||||
}
|
44
pkg/quota/evaluator/core/registry.go
Normal file
44
pkg/quota/evaluator/core/registry.go
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
)
|
||||
|
||||
// NewRegistry returns a registry that knows how to deal with core kubernetes resources
|
||||
func NewRegistry(kubeClient clientset.Interface) quota.Registry {
|
||||
pod := NewPodEvaluator(kubeClient)
|
||||
service := NewServiceEvaluator(kubeClient)
|
||||
replicationController := NewReplicationControllerEvaluator(kubeClient)
|
||||
resourceQuota := NewResourceQuotaEvaluator(kubeClient)
|
||||
secret := NewSecretEvaluator(kubeClient)
|
||||
persistentVolumeClaim := NewPersistentVolumeClaimEvaluator(kubeClient)
|
||||
return &generic.GenericRegistry{
|
||||
InternalEvaluators: map[unversioned.GroupKind]quota.Evaluator{
|
||||
pod.GroupKind(): pod,
|
||||
service.GroupKind(): service,
|
||||
replicationController.GroupKind(): replicationController,
|
||||
secret.GroupKind(): secret,
|
||||
resourceQuota.GroupKind(): resourceQuota,
|
||||
persistentVolumeClaim.GroupKind(): persistentVolumeClaim,
|
||||
},
|
||||
}
|
||||
}
|
45
pkg/quota/evaluator/core/replication_controllers.go
Normal file
45
pkg/quota/evaluator/core/replication_controllers.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// NewReplicationControllerEvaluator returns an evaluator that can evaluate replication controllers
|
||||
func NewReplicationControllerEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||
allResources := []api.ResourceName{api.ResourceReplicationControllers}
|
||||
return &generic.GenericEvaluator{
|
||||
Name: "Evaluator.ReplicationController",
|
||||
InternalGroupKind: api.Kind("ReplicationController"),
|
||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
||||
admission.Create: allResources,
|
||||
},
|
||||
MatchedResourceNames: allResources,
|
||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceReplicationControllers),
|
||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceReplicationControllers),
|
||||
ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().ReplicationControllers(namespace).List(options)
|
||||
},
|
||||
}
|
||||
}
|
45
pkg/quota/evaluator/core/resource_quotas.go
Normal file
45
pkg/quota/evaluator/core/resource_quotas.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
|
||||
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||
allResources := []api.ResourceName{api.ResourceQuotas}
|
||||
return &generic.GenericEvaluator{
|
||||
Name: "Evaluator.ResourceQuota",
|
||||
InternalGroupKind: api.Kind("ResourceQuota"),
|
||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
||||
admission.Create: allResources,
|
||||
},
|
||||
MatchedResourceNames: allResources,
|
||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
|
||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceQuotas),
|
||||
ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().ResourceQuotas(namespace).List(options)
|
||||
},
|
||||
}
|
||||
}
|
45
pkg/quota/evaluator/core/secrets.go
Normal file
45
pkg/quota/evaluator/core/secrets.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// NewSecretEvaluator returns an evaluator that can evaluate secrets
|
||||
func NewSecretEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||
allResources := []api.ResourceName{api.ResourceSecrets}
|
||||
return &generic.GenericEvaluator{
|
||||
Name: "Evaluator.Secret",
|
||||
InternalGroupKind: api.Kind("Secret"),
|
||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
||||
admission.Create: allResources,
|
||||
},
|
||||
MatchedResourceNames: allResources,
|
||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceSecrets),
|
||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceSecrets),
|
||||
ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().Secrets(namespace).List(options)
|
||||
},
|
||||
}
|
||||
}
|
45
pkg/quota/evaluator/core/services.go
Normal file
45
pkg/quota/evaluator/core/services.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// NewServiceEvaluator returns an evaluator that can evaluate service quotas
|
||||
func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||
allResources := []api.ResourceName{api.ResourceServices}
|
||||
return &generic.GenericEvaluator{
|
||||
Name: "Evaluator.Service",
|
||||
InternalGroupKind: api.Kind("Service"),
|
||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
||||
admission.Create: allResources,
|
||||
},
|
||||
MatchedResourceNames: allResources,
|
||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceServices),
|
||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceServices),
|
||||
ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().Services(namespace).List(options)
|
||||
},
|
||||
}
|
||||
}
|
199
pkg/quota/generic/evaluator.go
Normal file
199
pkg/quota/generic/evaluator.go
Normal file
@ -0,0 +1,199 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// ConstraintsFunc takes a list of required resources that must match on the input item
|
||||
type ConstraintsFunc func(required []api.ResourceName, item runtime.Object) error
|
||||
|
||||
// GetFuncByNamespace knows how to get a resource with specified namespace and name
|
||||
type GetFuncByNamespace func(namespace, name string) (runtime.Object, error)
|
||||
|
||||
// ListFuncByNamespace knows how to list resources in a namespace
|
||||
type ListFuncByNamespace func(namespace string, options api.ListOptions) (runtime.Object, error)
|
||||
|
||||
// MatchesScopeFunc knows how to evaluate if an object matches a scope
|
||||
type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) bool
|
||||
|
||||
// UsageFunc knows how to measure usage associated with an object
|
||||
type UsageFunc func(object runtime.Object) api.ResourceList
|
||||
|
||||
// MatchesNoScopeFunc returns false on all match checks
|
||||
func MatchesNoScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ObjectCountConstraintsFunc returns true if the specified resource name is in
|
||||
// the required set of resource names
|
||||
func ObjectCountConstraintsFunc(resourceName api.ResourceName) ConstraintsFunc {
|
||||
return func(required []api.ResourceName, item runtime.Object) error {
|
||||
if !quota.Contains(required, resourceName) {
|
||||
return fmt.Errorf("missing %s", resourceName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectCountUsageFunc is useful if you are only counting your object
|
||||
// It always returns 1 as the usage for the named resource
|
||||
func ObjectCountUsageFunc(resourceName api.ResourceName) UsageFunc {
|
||||
return func(object runtime.Object) api.ResourceList {
|
||||
return api.ResourceList{
|
||||
resourceName: resource.MustParse("1"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GenericEvaluator provides an implementation for quota.Evaluator
|
||||
type GenericEvaluator struct {
|
||||
// Name used for logging
|
||||
Name string
|
||||
// The GroupKind that this evaluator tracks
|
||||
InternalGroupKind unversioned.GroupKind
|
||||
// The set of resources that are pertinent to the mapped operation
|
||||
InternalOperationResources map[admission.Operation][]api.ResourceName
|
||||
// The set of resource names this evaluator matches
|
||||
MatchedResourceNames []api.ResourceName
|
||||
// A function that knows how to evaluate a matches scope request
|
||||
MatchesScopeFunc MatchesScopeFunc
|
||||
// A function that knows how to return usage for an object
|
||||
UsageFunc UsageFunc
|
||||
// A function that knows how to list resources by namespace
|
||||
ListFuncByNamespace ListFuncByNamespace
|
||||
// A function that knows how to get resource in a namespace
|
||||
// This function must be specified if the evaluator needs to handle UPDATE
|
||||
GetFuncByNamespace GetFuncByNamespace
|
||||
// A function that checks required constraints are satisfied
|
||||
ConstraintsFunc ConstraintsFunc
|
||||
}
|
||||
|
||||
// Ensure that GenericEvaluator implements quota.Evaluator
|
||||
var _ quota.Evaluator = &GenericEvaluator{}
|
||||
|
||||
// Constraints checks required constraints are satisfied on the input object
|
||||
func (g *GenericEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
|
||||
return g.ConstraintsFunc(required, item)
|
||||
}
|
||||
|
||||
// Get returns the object by namespace and name
|
||||
func (g *GenericEvaluator) Get(namespace, name string) (runtime.Object, error) {
|
||||
return g.GetFuncByNamespace(namespace, name)
|
||||
}
|
||||
|
||||
// OperationResources returns the set of resources that could be updated for the
|
||||
// specified operation for this kind. If empty, admission control will ignore
|
||||
// quota processing for the operation.
|
||||
func (g *GenericEvaluator) OperationResources(operation admission.Operation) []api.ResourceName {
|
||||
return g.InternalOperationResources[operation]
|
||||
}
|
||||
|
||||
// GroupKind that this evaluator tracks
|
||||
func (g *GenericEvaluator) GroupKind() unversioned.GroupKind {
|
||||
return g.InternalGroupKind
|
||||
}
|
||||
|
||||
// MatchesResources is the list of resources that this evaluator matches
|
||||
func (g *GenericEvaluator) MatchesResources() []api.ResourceName {
|
||||
return g.MatchedResourceNames
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (g *GenericEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) bool {
|
||||
if resourceQuota == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// verify the quota matches on resource, by default its false
|
||||
matchResource := false
|
||||
for resourceName := range resourceQuota.Status.Hard {
|
||||
if g.MatchesResource(resourceName) {
|
||||
matchResource = true
|
||||
}
|
||||
}
|
||||
// by default, no scopes matches all
|
||||
matchScope := true
|
||||
for _, scope := range resourceQuota.Spec.Scopes {
|
||||
matchScope = matchScope && g.MatchesScope(scope, item)
|
||||
}
|
||||
return matchResource && matchScope
|
||||
}
|
||||
|
||||
// MatchesResource returns true if this evaluator can match on the specified resource
|
||||
func (g *GenericEvaluator) MatchesResource(resourceName api.ResourceName) bool {
|
||||
for _, matchedResourceName := range g.MatchedResourceNames {
|
||||
if resourceName == matchedResourceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MatchesScope returns true if the input object matches the specified scope
|
||||
func (g *GenericEvaluator) MatchesScope(scope api.ResourceQuotaScope, object runtime.Object) bool {
|
||||
return g.MatchesScopeFunc(scope, object)
|
||||
}
|
||||
|
||||
// Usage returns the resource usage for the specified object
|
||||
func (g *GenericEvaluator) Usage(object runtime.Object) api.ResourceList {
|
||||
return g.UsageFunc(object)
|
||||
}
|
||||
|
||||
// UsageStats calculates latest observed usage stats for all objects
|
||||
func (g *GenericEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
// default each tracked resource to zero
|
||||
result := quota.UsageStats{Used: api.ResourceList{}}
|
||||
for _, resourceName := range g.MatchedResourceNames {
|
||||
result.Used[resourceName] = resource.MustParse("0")
|
||||
}
|
||||
list, err := g.ListFuncByNamespace(options.Namespace, api.ListOptions{})
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("%s: Failed to list %v: %v", g.Name, g.GroupKind, err)
|
||||
}
|
||||
_, err = meta.Accessor(list)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("%s: Unable to understand list result %#v", g.Name, list)
|
||||
}
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("%s: Unable to understand list result %#v (%v)", g.Name, list, err)
|
||||
}
|
||||
for _, item := range items {
|
||||
// need to verify that the item matches the set of scopes
|
||||
matchesScopes := true
|
||||
for _, scope := range options.Scopes {
|
||||
if !g.MatchesScope(scope, item) {
|
||||
matchesScopes = false
|
||||
}
|
||||
}
|
||||
// only count usage if there was a match
|
||||
if matchesScopes {
|
||||
result.Used = quota.Add(result.Used, g.Usage(item))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
36
pkg/quota/generic/registry.go
Normal file
36
pkg/quota/generic/registry.go
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generic
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
)
|
||||
|
||||
// Ensure it implements the required interface
|
||||
var _ quota.Registry = &GenericRegistry{}
|
||||
|
||||
// GenericRegistry implements Registry
|
||||
type GenericRegistry struct {
|
||||
// internal evaluators by group kind
|
||||
InternalEvaluators map[unversioned.GroupKind]quota.Evaluator
|
||||
}
|
||||
|
||||
// Evaluators returns the map of evaluators by groupKind
|
||||
func (r *GenericRegistry) Evaluators() map[unversioned.GroupKind]quota.Evaluator {
|
||||
return r.InternalEvaluators
|
||||
}
|
30
pkg/quota/install/registry.go
Normal file
30
pkg/quota/install/registry.go
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package install
|
||||
|
||||
import (
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
)
|
||||
|
||||
// NewRegistry returns a registry that knows how to deal kubernetes resources
|
||||
// across API groups
|
||||
func NewRegistry(kubeClient clientset.Interface) quota.Registry {
|
||||
// TODO: when quota supports resources in other api groups, we will need to merge
|
||||
return core.NewRegistry(kubeClient)
|
||||
}
|
66
pkg/quota/interfaces.go
Normal file
66
pkg/quota/interfaces.go
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// UsageStatsOptions is an options structs that describes how stats should be calculated
|
||||
type UsageStatsOptions struct {
|
||||
// Namespace where stats should be calculate
|
||||
Namespace string
|
||||
// Scopes that must match counted objects
|
||||
Scopes []api.ResourceQuotaScope
|
||||
}
|
||||
|
||||
// UsageStats is result of measuring observed resource use in the system
|
||||
type UsageStats struct {
|
||||
// Used maps resource to quantity used
|
||||
Used api.ResourceList
|
||||
}
|
||||
|
||||
// Evaluator knows how to evaluate quota usage for a particular group kind
|
||||
type Evaluator interface {
|
||||
// Constraints ensures that each required resource is present on item
|
||||
Constraints(required []api.ResourceName, item runtime.Object) error
|
||||
// Get returns the object with specified namespace and name
|
||||
Get(namespace, name string) (runtime.Object, error)
|
||||
// GroupKind returns the groupKind that this object knows how to evaluate
|
||||
GroupKind() unversioned.GroupKind
|
||||
// MatchesResources is the list of resources that this evaluator matches
|
||||
MatchesResources() []api.ResourceName
|
||||
// Matches returns true if the specified quota matches the input item
|
||||
Matches(resourceQuota *api.ResourceQuota, item runtime.Object) bool
|
||||
// OperationResources returns the set of resources that could be updated for the
|
||||
// specified operation for this kind. If empty, admission control will ignore
|
||||
// quota processing for the operation.
|
||||
OperationResources(operation admission.Operation) []api.ResourceName
|
||||
// Usage returns the resource usage for the specified object
|
||||
Usage(object runtime.Object) api.ResourceList
|
||||
// UsageStats calculates latest observed usage stats for all objects
|
||||
UsageStats(options UsageStatsOptions) (UsageStats, error)
|
||||
}
|
||||
|
||||
// Registry holds the list of evaluators associated to a particular group kind
|
||||
type Registry interface {
|
||||
// Evaluators returns the set Evaluator objects registered to a groupKind
|
||||
Evaluators() map[unversioned.GroupKind]Evaluator
|
||||
}
|
159
pkg/quota/resources.go
Normal file
159
pkg/quota/resources.go
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Equals returns true if the two lists are equivalent
|
||||
func Equals(a api.ResourceList, b api.ResourceList) bool {
|
||||
for key, value1 := range a {
|
||||
value2, found := b[key]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
if value1.Cmp(value2) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key, value1 := range b {
|
||||
value2, found := a[key]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
if value1.Cmp(value2) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// LessThanOrEqual returns true if a < b for each key in b
|
||||
// If false, it returns the keys in a that exceeded b
|
||||
func LessThanOrEqual(a api.ResourceList, b api.ResourceList) (bool, []api.ResourceName) {
|
||||
result := true
|
||||
resourceNames := []api.ResourceName{}
|
||||
for key, value := range b {
|
||||
if other, found := a[key]; found {
|
||||
if other.Cmp(value) > 0 {
|
||||
result = false
|
||||
resourceNames = append(resourceNames, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, resourceNames
|
||||
}
|
||||
|
||||
// Add returns the result of a + b for each named resource
|
||||
func Add(a api.ResourceList, b api.ResourceList) api.ResourceList {
|
||||
result := api.ResourceList{}
|
||||
for key, value := range a {
|
||||
quantity := *value.Copy()
|
||||
if other, found := b[key]; found {
|
||||
quantity.Add(other)
|
||||
}
|
||||
result[key] = quantity
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
quantity := *value.Copy()
|
||||
result[key] = quantity
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Subtract returns the result of a - b for each named resource
|
||||
func Subtract(a api.ResourceList, b api.ResourceList) api.ResourceList {
|
||||
result := api.ResourceList{}
|
||||
for key, value := range a {
|
||||
quantity := *value.Copy()
|
||||
if other, found := b[key]; found {
|
||||
quantity.Sub(other)
|
||||
}
|
||||
result[key] = quantity
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
quantity := *value.Copy()
|
||||
quantity.Neg(value)
|
||||
result[key] = quantity
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Mask returns a new resource list that only has the values with the specified names
|
||||
func Mask(resources api.ResourceList, names []api.ResourceName) api.ResourceList {
|
||||
nameSet := ToSet(names)
|
||||
result := api.ResourceList{}
|
||||
for key, value := range resources {
|
||||
if nameSet.Has(string(key)) {
|
||||
result[key] = *value.Copy()
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ResourceNames returns a list of all resource names in the ResourceList
|
||||
func ResourceNames(resources api.ResourceList) []api.ResourceName {
|
||||
result := []api.ResourceName{}
|
||||
for resourceName := range resources {
|
||||
result = append(result, resourceName)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Contains returns true if the specified item is in the list of items
|
||||
func Contains(items []api.ResourceName, item api.ResourceName) bool {
|
||||
return ToSet(items).Has(string(item))
|
||||
}
|
||||
|
||||
// Intersection returns the intersection of both list of resources
|
||||
func Intersection(a []api.ResourceName, b []api.ResourceName) []api.ResourceName {
|
||||
setA := ToSet(a)
|
||||
setB := ToSet(b)
|
||||
setC := setA.Intersection(setB)
|
||||
result := []api.ResourceName{}
|
||||
for _, resourceName := range setC.List() {
|
||||
result = append(result, api.ResourceName(resourceName))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsZero returns true if each key maps to the quantity value 0
|
||||
func IsZero(a api.ResourceList) bool {
|
||||
zero := resource.MustParse("0")
|
||||
for _, v := range a {
|
||||
if v.Cmp(zero) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ToSet takes a list of resource names and converts to a string set
|
||||
func ToSet(resourceNames []api.ResourceName) sets.String {
|
||||
result := sets.NewString()
|
||||
for _, resourceName := range resourceNames {
|
||||
result.Insert(string(resourceName))
|
||||
}
|
||||
return result
|
||||
}
|
223
pkg/quota/resources_test.go
Normal file
223
pkg/quota/resources_test.go
Normal file
@ -0,0 +1,223 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestEquals(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceList
|
||||
b api.ResourceList
|
||||
expected bool
|
||||
}{
|
||||
"isEqual": {
|
||||
a: api.ResourceList{},
|
||||
b: api.ResourceList{},
|
||||
expected: true,
|
||||
},
|
||||
"isEqualWithKeys": {
|
||||
a: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
b: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
"isNotEqualSameKeys": {
|
||||
a: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
b: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"isNotEqualDiffKeys": {
|
||||
a: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
b: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
api.ResourcePods: resource.MustParse("1"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
if result := Equals(testCase.a, testCase.b); result != testCase.expected {
|
||||
t.Errorf("%s expected: %v, actual: %v, a=%v, b=%v", testName, testCase.expected, result, testCase.a, testCase.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceList
|
||||
b api.ResourceList
|
||||
expected api.ResourceList
|
||||
}{
|
||||
"noKeys": {
|
||||
a: api.ResourceList{},
|
||||
b: api.ResourceList{},
|
||||
expected: api.ResourceList{},
|
||||
},
|
||||
"toEmpty": {
|
||||
a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
b: api.ResourceList{},
|
||||
expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
"matching": {
|
||||
a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
expected: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
sum := Add(testCase.a, testCase.b)
|
||||
if result := Equals(testCase.expected, sum); !result {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubtract(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceList
|
||||
b api.ResourceList
|
||||
expected api.ResourceList
|
||||
}{
|
||||
"noKeys": {
|
||||
a: api.ResourceList{},
|
||||
b: api.ResourceList{},
|
||||
expected: api.ResourceList{},
|
||||
},
|
||||
"value-empty": {
|
||||
a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
b: api.ResourceList{},
|
||||
expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
"empty-value": {
|
||||
a: api.ResourceList{},
|
||||
b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
expected: api.ResourceList{api.ResourceCPU: resource.MustParse("-100m")},
|
||||
},
|
||||
"value-value": {
|
||||
a: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
sub := Subtract(testCase.a, testCase.b)
|
||||
if result := Equals(testCase.expected, sub); !result {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sub)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceNames(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceList
|
||||
expected []api.ResourceName
|
||||
}{
|
||||
"empty": {
|
||||
a: api.ResourceList{},
|
||||
expected: []api.ResourceName{},
|
||||
},
|
||||
"values": {
|
||||
a: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
expected: []api.ResourceName{api.ResourceMemory, api.ResourceCPU},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
actualSet := ToSet(ResourceNames(testCase.a))
|
||||
expectedSet := ToSet(testCase.expected)
|
||||
if !actualSet.Equal(expectedSet) {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContains(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a []api.ResourceName
|
||||
b api.ResourceName
|
||||
expected bool
|
||||
}{
|
||||
"does-not-contain": {
|
||||
a: []api.ResourceName{api.ResourceMemory},
|
||||
b: api.ResourceCPU,
|
||||
expected: false,
|
||||
},
|
||||
"does-contain": {
|
||||
a: []api.ResourceName{api.ResourceMemory, api.ResourceCPU},
|
||||
b: api.ResourceCPU,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
if actual := Contains(testCase.a, testCase.b); actual != testCase.expected {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsZero(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceList
|
||||
expected bool
|
||||
}{
|
||||
"empty": {
|
||||
a: api.ResourceList{},
|
||||
expected: true,
|
||||
},
|
||||
"zero": {
|
||||
a: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
api.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
"non-zero": {
|
||||
a: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
if result := IsZero(testCase.a); result != testCase.expected {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected)
|
||||
}
|
||||
}
|
||||
}
|
@ -20,35 +20,61 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/golang-lru"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("ResourceQuota", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewResourceQuota(client), nil
|
||||
admission.RegisterPlugin("ResourceQuota",
|
||||
func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
registry := install.NewRegistry(client)
|
||||
return NewResourceQuota(client, registry)
|
||||
})
|
||||
}
|
||||
|
||||
type quota struct {
|
||||
// quotaAdmission implements an admission controller that can enforce quota constraints
|
||||
type quotaAdmission struct {
|
||||
*admission.Handler
|
||||
// must be able to read/write ResourceQuota
|
||||
client clientset.Interface
|
||||
// indexer that holds quota objects by namespace
|
||||
indexer cache.Indexer
|
||||
// registry that knows how to measure usage for objects
|
||||
registry quota.Registry
|
||||
|
||||
// liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures.
|
||||
// This let's us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results.
|
||||
// We track the lookup result here so that for repeated requests, we don't look it up very often.
|
||||
liveLookupCache *lru.Cache
|
||||
liveTTL time.Duration
|
||||
}
|
||||
|
||||
// NewResourceQuota creates a new resource quota admission control handler
|
||||
func NewResourceQuota(client clientset.Interface) admission.Interface {
|
||||
type liveLookupEntry struct {
|
||||
expiry time.Time
|
||||
items []*api.ResourceQuota
|
||||
}
|
||||
|
||||
// NewResourceQuota configures an admission controller that can enforce quota constraints
|
||||
// using the provided registry. The registry must have the capability to handle group/kinds that
|
||||
// are persisted by the server this admission controller is intercepting
|
||||
func NewResourceQuota(client clientset.Interface, registry quota.Registry) (admission.Interface, error) {
|
||||
liveLookupCache, err := lru.New(100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
@ -59,213 +85,218 @@ func NewResourceQuota(client clientset.Interface) admission.Interface {
|
||||
}
|
||||
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
|
||||
reflector.Run()
|
||||
return createResourceQuota(client, indexer)
|
||||
}
|
||||
|
||||
func createResourceQuota(client clientset.Interface, indexer cache.Indexer) admission.Interface {
|
||||
return "a{
|
||||
return "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: client,
|
||||
indexer: indexer,
|
||||
}
|
||||
registry: registry,
|
||||
liveLookupCache: liveLookupCache,
|
||||
liveTTL: time.Duration(30 * time.Second),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var resourceToResourceName = map[unversioned.GroupResource]api.ResourceName{
|
||||
api.Resource("pods"): api.ResourcePods,
|
||||
api.Resource("services"): api.ResourceServices,
|
||||
api.Resource("replicationcontrollers"): api.ResourceReplicationControllers,
|
||||
api.Resource("resourcequotas"): api.ResourceQuotas,
|
||||
api.Resource("secrets"): api.ResourceSecrets,
|
||||
api.Resource("persistentvolumeclaims"): api.ResourcePersistentVolumeClaims,
|
||||
}
|
||||
|
||||
func (q *quota) Admit(a admission.Attributes) (err error) {
|
||||
// Admit makes admission decisions while enforcing quota
|
||||
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
|
||||
// ignore all operations that correspond to sub-resource actions
|
||||
if a.GetSubresource() != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if a.GetOperation() == "DELETE" {
|
||||
// if we do not know how to evaluate use for this kind, just ignore
|
||||
evaluators := q.registry.Evaluators()
|
||||
evaluator, found := evaluators[a.GetKind()]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: a.GetNamespace(),
|
||||
Name: "",
|
||||
},
|
||||
// for this kind, check if the operation could mutate any quota resources
|
||||
// if no resources tracked by quota are impacted, then just return
|
||||
op := a.GetOperation()
|
||||
operationResources := evaluator.OperationResources(op)
|
||||
if len(operationResources) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// concurrent operations that modify quota tracked resources can cause a conflict when incrementing usage
|
||||
// as a result, we will attempt to increment quota usage per request up to numRetries limit
|
||||
// we fuzz each retry with an interval period to attempt to improve end-user experience during concurrent operations
|
||||
numRetries := 10
|
||||
interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond
|
||||
|
||||
items, err := q.indexer.Index("namespace", key)
|
||||
// determine if there are any quotas in this namespace
|
||||
// if there are no quotas, we don't need to do anything
|
||||
namespace, name := a.GetNamespace(), a.GetName()
|
||||
items, err := q.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: ""}})
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("unable to %s %s at this time because there was an error enforcing quota", a.GetOperation(), a.GetResource()))
|
||||
return admission.NewForbidden(a, fmt.Errorf("Error resolving quota."))
|
||||
}
|
||||
// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
|
||||
if len(items) == 0 {
|
||||
lruItemObj, ok := q.liveLookupCache.Get(a.GetNamespace())
|
||||
if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
|
||||
liveList, err := q.client.Core().ResourceQuotas(namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
newEntry := liveLookupEntry{expiry: time.Now().Add(q.liveTTL)}
|
||||
for i := range liveList.Items {
|
||||
newEntry.items = append(newEntry.items, &liveList.Items[i])
|
||||
}
|
||||
q.liveLookupCache.Add(a.GetNamespace(), newEntry)
|
||||
lruItemObj = newEntry
|
||||
}
|
||||
lruEntry := lruItemObj.(liveLookupEntry)
|
||||
for i := range lruEntry.items {
|
||||
items = append(items, lruEntry.items[i])
|
||||
}
|
||||
}
|
||||
// if there are still no items, we can return
|
||||
if len(items) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// find the set of quotas that are pertinent to this request
|
||||
// reject if we match the quota, but usage is not calculated yet
|
||||
// reject if the input object does not satisfy quota constraints
|
||||
// if there are no pertinent quotas, we can just return
|
||||
inputObject := a.GetObject()
|
||||
resourceQuotas := []*api.ResourceQuota{}
|
||||
for i := range items {
|
||||
|
||||
quota := items[i].(*api.ResourceQuota)
|
||||
|
||||
for retry := 1; retry <= numRetries; retry++ {
|
||||
|
||||
// we cannot modify the value directly in the cache, so we copy
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
resourceQuota := items[i].(*api.ResourceQuota)
|
||||
match := evaluator.Matches(resourceQuota, inputObject)
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
for k, v := range quota.Status.Hard {
|
||||
status.Hard[k] = *v.Copy()
|
||||
}
|
||||
for k, v := range quota.Status.Used {
|
||||
status.Used[k] = *v.Copy()
|
||||
}
|
||||
|
||||
dirty, err := IncrementUsage(a, status, q.client)
|
||||
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
evaluatorResources := evaluator.MatchesResources()
|
||||
requiredResources := quota.Intersection(hardResources, evaluatorResources)
|
||||
err := evaluator.Constraints(requiredResources, inputObject)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
return admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
|
||||
}
|
||||
if !hasUsageStats(resourceQuota) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
|
||||
}
|
||||
resourceQuotas = append(resourceQuotas, resourceQuota)
|
||||
}
|
||||
if len(resourceQuotas) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if dirty {
|
||||
// construct a usage record
|
||||
usage := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: quota.Name,
|
||||
Namespace: quota.Namespace,
|
||||
ResourceVersion: quota.ResourceVersion,
|
||||
Labels: quota.Labels,
|
||||
Annotations: quota.Annotations},
|
||||
// there is at least one quota that definitely matches our object
|
||||
// as a result, we need to measure the usage of this object for quota
|
||||
// on updates, we need to subtract the previous measured usage
|
||||
// if usage shows no change, just return since it has no impact on quota
|
||||
deltaUsage := evaluator.Usage(inputObject)
|
||||
if admission.Update == op {
|
||||
prevItem, err := evaluator.Get(namespace, name)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("Unable to get previous: %v", err))
|
||||
}
|
||||
prevUsage := evaluator.Usage(prevItem)
|
||||
deltaUsage = quota.Subtract(deltaUsage, prevUsage)
|
||||
}
|
||||
if quota.IsZero(deltaUsage) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Move to a bucketing work queue
|
||||
// If we guaranteed that we processed the request in order it was received to server, we would reduce quota conflicts.
|
||||
// Until we have the bucketing work queue, we jitter requests and retry on conflict.
|
||||
numRetries := 10
|
||||
interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond
|
||||
|
||||
// seed the retry loop with the initial set of quotas to process (should reduce each iteration)
|
||||
resourceQuotasToProcess := resourceQuotas
|
||||
for retry := 1; retry <= numRetries; retry++ {
|
||||
// the list of quotas we will try again if there is a version conflict
|
||||
tryAgain := []*api.ResourceQuota{}
|
||||
|
||||
// check that we pass all remaining quotas so we do not prematurely charge
|
||||
// for each quota, mask the usage to the set of resources tracked by the quota
|
||||
// if request + used > hard, return an error describing the failure
|
||||
updatedUsage := map[string]api.ResourceList{}
|
||||
for _, resourceQuota := range resourceQuotasToProcess {
|
||||
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
requestedUsage := quota.Mask(deltaUsage, hardResources)
|
||||
newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
|
||||
if allowed, exceeded := quota.LessThanOrEqual(newUsage, resourceQuota.Status.Hard); !allowed {
|
||||
failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
|
||||
failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
|
||||
failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
|
||||
return admission.NewForbidden(a,
|
||||
fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
|
||||
resourceQuota.Name,
|
||||
prettyPrint(failedRequestedUsage),
|
||||
prettyPrint(failedUsed),
|
||||
prettyPrint(failedHard)))
|
||||
}
|
||||
updatedUsage[resourceQuota.Name] = newUsage
|
||||
}
|
||||
|
||||
// update the status for each quota with its new usage
|
||||
// if we get a conflict, get updated quota, and enqueue
|
||||
for i, resourceQuota := range resourceQuotasToProcess {
|
||||
newUsage := updatedUsage[resourceQuota.Name]
|
||||
quotaToUpdate := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: resourceQuota.Name,
|
||||
Namespace: resourceQuota.Namespace,
|
||||
ResourceVersion: resourceQuota.ResourceVersion,
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: quota.Add(api.ResourceList{}, resourceQuota.Status.Hard),
|
||||
Used: newUsage,
|
||||
},
|
||||
}
|
||||
_, err = q.client.Core().ResourceQuotas(quotaToUpdate.Namespace).UpdateStatus(quotaToUpdate)
|
||||
if err != nil {
|
||||
if !errors.IsConflict(err) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("Unable to update quota status: %s %v", resourceQuota.Name, err))
|
||||
}
|
||||
// if we get a conflict, we get the latest copy of the quota documents that were not yet modified so we retry all with latest state.
|
||||
for fetchIndex := i; fetchIndex < len(resourceQuotasToProcess); fetchIndex++ {
|
||||
latestQuota, err := q.client.Core().ResourceQuotas(namespace).Get(resourceQuotasToProcess[fetchIndex].Name)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("Unable to get quota: %s %v", resourceQuotasToProcess[fetchIndex].Name, err))
|
||||
}
|
||||
tryAgain = append(tryAgain, latestQuota)
|
||||
}
|
||||
usage.Status = *status
|
||||
_, err = q.client.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// all quotas were updated, so we can return
|
||||
if len(tryAgain) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// we have concurrent requests to update quota, so look to retry if needed
|
||||
// next iteration, we need to process the items that have to try again
|
||||
// pause the specified interval to encourage jitter
|
||||
if retry == numRetries {
|
||||
return admission.NewForbidden(a, fmt.Errorf("unable to %s %s at this time because there are too many concurrent requests to increment quota", a.GetOperation(), a.GetResource()))
|
||||
names := []string{}
|
||||
for _, quota := range tryAgain {
|
||||
names = append(names, quota.Name)
|
||||
}
|
||||
return admission.NewForbidden(a, fmt.Errorf("Unable to update status for quota: %s, ", strings.Join(names, ",")))
|
||||
}
|
||||
resourceQuotasToProcess = tryAgain
|
||||
time.Sleep(interval)
|
||||
// manually get the latest quota
|
||||
quota, err = q.client.Core().ResourceQuotas(usage.Namespace).Get(quota.Name)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
|
||||
// Return true if the usage must be recorded prior to admitting the new resource
|
||||
// Return an error if the operation should not pass admission control
|
||||
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client clientset.Interface) (bool, error) {
|
||||
// on update, the only resource that can modify the value of a quota is pods
|
||||
// so if your not a pod, we exit quickly
|
||||
if a.GetOperation() == admission.Update && a.GetResource() != api.Resource("pods") {
|
||||
return false, nil
|
||||
// prettyPrint formats a resource list for usage in errors
|
||||
func prettyPrint(item api.ResourceList) string {
|
||||
parts := []string{}
|
||||
for key, value := range item {
|
||||
constraint := string(key) + "=" + value.String()
|
||||
parts = append(parts, constraint)
|
||||
}
|
||||
return strings.Join(parts, ",")
|
||||
}
|
||||
|
||||
var errs []error
|
||||
dirty := true
|
||||
set := map[api.ResourceName]bool{}
|
||||
for k := range status.Hard {
|
||||
set[k] = true
|
||||
}
|
||||
obj := a.GetObject()
|
||||
// handle max counts for each kind of resource (pods, services, replicationControllers, etc.)
|
||||
if a.GetOperation() == admission.Create {
|
||||
resourceName := resourceToResourceName[a.GetResource()]
|
||||
hard, hardFound := status.Hard[resourceName]
|
||||
if hardFound {
|
||||
used, usedFound := status.Used[resourceName]
|
||||
if !usedFound {
|
||||
return false, fmt.Errorf("quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
|
||||
}
|
||||
if used.Value() >= hard.Value() {
|
||||
errs = append(errs, fmt.Errorf("limited to %s %s", hard.String(), resourceName))
|
||||
dirty = false
|
||||
} else {
|
||||
status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI)
|
||||
// hasUsageStats returns true if for each hard constraint there is a value for its current usage
|
||||
func hasUsageStats(resourceQuota *api.ResourceQuota) bool {
|
||||
for resourceName := range resourceQuota.Status.Hard {
|
||||
if _, found := resourceQuota.Status.Used[resourceName]; !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if a.GetResource() == api.Resource("pods") {
|
||||
for _, resourceName := range []api.ResourceName{api.ResourceMemory, api.ResourceCPU} {
|
||||
|
||||
// ignore tracking the resource if it's not in the quota document
|
||||
if !set[resourceName] {
|
||||
continue
|
||||
}
|
||||
|
||||
hard, hardFound := status.Hard[resourceName]
|
||||
if !hardFound {
|
||||
continue
|
||||
}
|
||||
|
||||
// if we do not yet know how much of the current resource is used, we cannot accept any request
|
||||
used, usedFound := status.Used[resourceName]
|
||||
if !usedFound {
|
||||
return false, fmt.Errorf("unable to admit pod until quota usage stats are calculated.")
|
||||
}
|
||||
|
||||
// the amount of resource being requested, or an error if it does not make a request that is tracked
|
||||
pod := obj.(*api.Pod)
|
||||
delta, err := resourcequotacontroller.PodRequests(pod, resourceName)
|
||||
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("%s is limited by quota, must make explicit request.", resourceName)
|
||||
}
|
||||
|
||||
// if this operation is an update, we need to find the delta usage from the previous state
|
||||
if a.GetOperation() == admission.Update {
|
||||
oldPod, err := client.Core().Pods(a.GetNamespace()).Get(pod.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if the previous version of the resource made a resource request, we need to subtract the old request
|
||||
// from the current to get the actual resource request delta. if the previous version of the pod
|
||||
// made no request on the resource, then we get an err value. we ignore the err value, and delta
|
||||
// will just be equal to the total resource request on the pod since there is nothing to subtract.
|
||||
oldRequest, err := resourcequotacontroller.PodRequests(oldPod, resourceName)
|
||||
if err == nil {
|
||||
err = delta.Sub(*oldRequest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newUsage := used.Copy()
|
||||
newUsage.Add(*delta)
|
||||
|
||||
// make the most precise comparison possible
|
||||
newUsageValue := newUsage.Value()
|
||||
hardUsageValue := hard.Value()
|
||||
if newUsageValue <= resource.MaxMilliValue && hardUsageValue <= resource.MaxMilliValue {
|
||||
newUsageValue = newUsage.MilliValue()
|
||||
hardUsageValue = hard.MilliValue()
|
||||
}
|
||||
|
||||
if newUsageValue > hardUsageValue {
|
||||
errs = append(errs, fmt.Errorf("%s quota is %s, current usage is %s, requesting %s.", resourceName, hard.String(), used.String(), delta.String()))
|
||||
dirty = false
|
||||
} else {
|
||||
status.Used[resourceName] = *newUsage
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return dirty, utilerrors.NewAggregate(errs)
|
||||
return true
|
||||
}
|
||||
|
@ -18,16 +18,20 @@ package resourcequota
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/golang-lru"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func getResourceList(cpu, memory string) api.ResourceList {
|
||||
@ -63,386 +67,502 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements
|
||||
return pod
|
||||
}
|
||||
|
||||
// TestAdmissionIgnoresDelete verifies that the admission controller ignores delete operations
|
||||
func TestAdmissionIgnoresDelete(t *testing.T) {
|
||||
kubeClient := fake.NewSimpleClientset()
|
||||
handler, err := NewResourceQuota(kubeClient, install.NewRegistry(kubeClient))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
namespace := "default"
|
||||
handler := createResourceQuota(&fake.Clientset{}, nil)
|
||||
err := handler.Admit(admission.NewAttributesRecord(nil, api.Kind("Pod"), namespace, "name", api.Resource("pods"), "", admission.Delete, nil))
|
||||
err = handler.Admit(admission.NewAttributesRecord(nil, api.Kind("Pod"), namespace, "name", api.Resource("pods"), "", admission.Delete, nil))
|
||||
if err != nil {
|
||||
t.Errorf("ResourceQuota should admit all deletes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
|
||||
// It verifies that creation of a pod that would have exceeded quota is properly failed
|
||||
// It verifies that create operations to a subresource that would have exceeded quota would succeed
|
||||
func TestAdmissionIgnoresSubresources(t *testing.T) {
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := createResourceQuota(&fake.Clientset{}, indexer)
|
||||
|
||||
quota := &api.ResourceQuota{}
|
||||
quota.Name = "quota"
|
||||
quota.Namespace = "test"
|
||||
quota.Status = api.ResourceQuotaStatus{
|
||||
resourceQuota := &api.ResourceQuota{}
|
||||
resourceQuota.Name = "quota"
|
||||
resourceQuota.Namespace = "test"
|
||||
resourceQuota.Status = api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
quota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
|
||||
quota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
|
||||
|
||||
indexer.Add(quota)
|
||||
|
||||
resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
|
||||
resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
handler.indexer.Add(resourceQuota)
|
||||
newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err == nil {
|
||||
t.Errorf("Expected an error because the pod exceeded allowed quota")
|
||||
}
|
||||
|
||||
err = handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "subresource", admission.Create, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIncrementUsagePodResources(t *testing.T) {
|
||||
type testCase struct {
|
||||
testName string
|
||||
existing *api.Pod
|
||||
input *api.Pod
|
||||
resourceName api.ResourceName
|
||||
hard resource.Quantity
|
||||
expectedUsage resource.Quantity
|
||||
expectedError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
testName: "memory-allowed",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
hard: resource.MustParse("500Mi"),
|
||||
expectedUsage: resource.MustParse("200Mi"),
|
||||
expectedError: false,
|
||||
// TestAdmitBelowQuotaLimit verifies that a pod when created has its usage reflected on the quota
|
||||
func TestAdmitBelowQuotaLimit(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
{
|
||||
testName: "memory-not-allowed",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("", "450Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
hard: resource.MustParse("500Mi"),
|
||||
expectedError: true,
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
api.ResourceMemory: resource.MustParse("50Gi"),
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
{
|
||||
testName: "memory-not-allowed-with-different-format",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100M"), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("", "450Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
hard: resource.MustParse("500Mi"),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
testName: "memory-no-request",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
hard: resource.MustParse("500Mi"),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
testName: "cpu-allowed",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
hard: resource.MustParse("2"),
|
||||
expectedUsage: resource.MustParse("2"),
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
testName: "cpu-not-allowed",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("600m", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
hard: resource.MustParse("1500m"),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
testName: "cpu-no-request",
|
||||
existing: validPod("a", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))),
|
||||
input: validPod("b", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
hard: resource.MustParse("1500m"),
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
for _, item := range testCases {
|
||||
podList := &api.PodList{Items: []api.Pod{*item.existing}}
|
||||
client := fake.NewSimpleClientset(podList)
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
used, err := resourcequotacontroller.PodRequests(item.existing, item.resourceName)
|
||||
if err != nil {
|
||||
t.Errorf("Test %s, unexpected error %v", item.testName, err)
|
||||
}
|
||||
status.Hard[item.resourceName] = item.hard
|
||||
status.Used[item.resourceName] = *used
|
||||
|
||||
dirty, err := IncrementUsage(admission.NewAttributesRecord(item.input, api.Kind("Pod"), item.input.Namespace, item.input.Name, api.Resource("pods"), "", admission.Create, nil), status, client)
|
||||
if err == nil && item.expectedError {
|
||||
t.Errorf("Test %s, expected error", item.testName)
|
||||
}
|
||||
if err != nil && !item.expectedError {
|
||||
t.Errorf("Test %s, unexpected error", err)
|
||||
}
|
||||
if !item.expectedError {
|
||||
if !dirty {
|
||||
t.Errorf("Test %s, expected the quota to be dirty", item.testName)
|
||||
}
|
||||
quantity := status.Used[item.resourceName]
|
||||
if quantity.String() != item.expectedUsage.String() {
|
||||
t.Errorf("Test %s, expected usage %s, actual usage %s", item.testName, item.expectedUsage.String(), quantity.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementUsagePods(t *testing.T) {
|
||||
pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
|
||||
podList := &api.PodList{Items: []api.Pod{*pod}}
|
||||
client := fake.NewSimpleClientset(podList)
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourcePods
|
||||
status.Hard[r] = resource.MustParse("2")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, api.Kind("Pod"), pod.Namespace, "new-pod", api.Resource("pods"), "", admission.Create, nil), status, client)
|
||||
handler.indexer.Add(resourceQuota)
|
||||
newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !dirty {
|
||||
t.Errorf("Expected the status to get incremented, therefore should have been dirty")
|
||||
if len(kubeClient.Actions()) == 0 {
|
||||
t.Errorf("Expected a client action")
|
||||
}
|
||||
|
||||
expectedActionSet := sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
)
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1100m"),
|
||||
api.ResourceMemory: resource.MustParse("52Gi"),
|
||||
api.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, v := range expectedUsage.Status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
quantity := status.Used[r]
|
||||
if quantity.Value() != int64(2) {
|
||||
t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestExceedUsagePods(t *testing.T) {
|
||||
pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
|
||||
podList := &api.PodList{Items: []api.Pod{*pod}}
|
||||
client := fake.NewSimpleClientset(podList)
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
|
||||
func TestAdmitExceedQuotaLimit(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
api.ResourceMemory: resource.MustParse("50Gi"),
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
r := api.ResourcePods
|
||||
status.Hard[r] = resource.MustParse("1")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
_, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, api.Kind("Pod"), pod.Namespace, "name", api.Resource("pods"), "", admission.Create, nil), status, client)
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
handler.indexer.Add(resourceQuota)
|
||||
newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err == nil {
|
||||
t.Errorf("Expected error because this would exceed your quota")
|
||||
t.Errorf("Expected an error exceeding quota")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementUsageServices(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := fake.NewSimpleClientset(&api.ServiceList{
|
||||
Items: []api.Service{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
// TestAdmitEnforceQuotaConstraints verifies that if a quota tracks a particular resource that that resource is
|
||||
// specified on the pod. In this case, we create a quota that tracks cpu request, memory request, and memory limit.
|
||||
// We ensure that a pod that does not specify a memory limit that it fails in admission.
|
||||
func TestAdmitEnforceQuotaConstraints(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourceLimitsMemory: resource.MustParse("200Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
api.ResourceMemory: resource.MustParse("50Gi"),
|
||||
api.ResourceLimitsMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
})
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourceServices
|
||||
status.Hard[r] = resource.MustParse("2")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.Service{}, api.Kind("Service"), namespace, "name", api.Resource("services"), "", admission.Create, nil), status, client)
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
handler.indexer.Add(resourceQuota)
|
||||
newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err == nil {
|
||||
t.Errorf("Expected an error because the pod does not specify a memory limit")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in
|
||||
func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourceLimitsMemory: resource.MustParse("200Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
api.ResourceMemory: resource.MustParse("50Gi"),
|
||||
api.ResourceLimitsMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
liveLookupCache, err := lru.New(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
liveLookupCache: liveLookupCache,
|
||||
}
|
||||
// Add to the index
|
||||
handler.indexer.Add(resourceQuota)
|
||||
newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
|
||||
// Add to the lru cache so we do not do a live client lookup
|
||||
liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*api.ResourceQuota{}})
|
||||
err = handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an error because the pod is in a different namespace than the quota")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmitBelowTerminatingQuotaLimit ensures that terminating pods are charged to the right quota.
|
||||
// It creates a terminating and non-terminating quota, and creates a terminating pod.
|
||||
// It ensures that the terminating quota is incremented, and the non-terminating quota is not.
|
||||
func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) {
|
||||
resourceQuotaNonTerminating := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-non-terminating", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotTerminating},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
api.ResourceMemory: resource.MustParse("50Gi"),
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
resourceQuotaTerminating := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-terminating", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
api.ResourceMemory: resource.MustParse("50Gi"),
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuotaTerminating, resourceQuotaNonTerminating)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
handler.indexer.Add(resourceQuotaNonTerminating)
|
||||
handler.indexer.Add(resourceQuotaTerminating)
|
||||
|
||||
// create a pod that has an active deadline
|
||||
newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
|
||||
activeDeadlineSeconds := int64(30)
|
||||
newPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !dirty {
|
||||
t.Errorf("Expected the status to get incremented, therefore should have been dirty")
|
||||
if len(kubeClient.Actions()) == 0 {
|
||||
t.Errorf("Expected a client action")
|
||||
}
|
||||
|
||||
expectedActionSet := sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
)
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
|
||||
// ensure only the quota-terminating was updated
|
||||
if usage.Name != resourceQuotaTerminating.Name {
|
||||
t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaTerminating.Name, usage.Name)
|
||||
}
|
||||
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1100m"),
|
||||
api.ResourceMemory: resource.MustParse("52Gi"),
|
||||
api.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, v := range expectedUsage.Status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
quantity := status.Used[r]
|
||||
if quantity.Value() != int64(2) {
|
||||
t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestExceedUsageServices(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := fake.NewSimpleClientset(&api.ServiceList{
|
||||
Items: []api.Service{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
// TestAdmitBelowBestEffortQuotaLimit creates a best effort and non-best effort quota.
|
||||
// It verifies that best effort pods are properly scoped to the best effort quota document.
|
||||
func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) {
|
||||
resourceQuotaBestEffort := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
})
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourceServices
|
||||
status.Hard[r] = resource.MustParse("1")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
_, err := IncrementUsage(admission.NewAttributesRecord(&api.Service{}, api.Kind("Service"), namespace, "name", api.Resource("services"), "", admission.Create, nil), status, client)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error because this would exceed usage")
|
||||
resourceQuotaNotBestEffort := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-not-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuotaBestEffort, resourceQuotaNotBestEffort)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
handler.indexer.Add(resourceQuotaBestEffort)
|
||||
handler.indexer.Add(resourceQuotaNotBestEffort)
|
||||
|
||||
func TestIncrementUsageReplicationControllers(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := fake.NewSimpleClientset(&api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
},
|
||||
},
|
||||
})
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourceReplicationControllers
|
||||
status.Hard[r] = resource.MustParse("2")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.ReplicationController{}, api.Kind("ReplicationController"), namespace, "name", api.Resource("replicationcontrollers"), "", admission.Create, nil), status, client)
|
||||
// create a pod that is best effort because it does not make a request for anything
|
||||
newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")))
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !dirty {
|
||||
t.Errorf("Expected the status to get incremented, therefore should have been dirty")
|
||||
expectedActionSet := sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
)
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
|
||||
if usage.Name != resourceQuotaBestEffort.Name {
|
||||
t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaBestEffort.Name, usage.Name)
|
||||
}
|
||||
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, v := range expectedUsage.Status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
quantity := status.Used[r]
|
||||
if quantity.Value() != int64(2) {
|
||||
t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestExceedUsageReplicationControllers(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := fake.NewSimpleClientset(&api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
// TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource
|
||||
// guaranteed pod.
|
||||
func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
})
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourceReplicationControllers
|
||||
status.Hard[r] = resource.MustParse("1")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
_, err := IncrementUsage(admission.NewAttributesRecord(&api.ReplicationController{}, api.Kind("ReplicationController"), namespace, "name", api.Resource("replicationcontrollers"), "", admission.Create, nil), status, client)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for exceeding hard limits")
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := "aAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: kubeClient,
|
||||
indexer: indexer,
|
||||
registry: install.NewRegistry(kubeClient),
|
||||
}
|
||||
handler.indexer.Add(resourceQuota)
|
||||
newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
|
||||
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(kubeClient.Actions()) != 0 {
|
||||
t.Errorf("Expected no client actions because the incoming pod did not match best effort quota")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExceedUsageSecrets(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := fake.NewSimpleClientset(&api.SecretList{
|
||||
Items: []api.Secret{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
},
|
||||
},
|
||||
})
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourceSecrets
|
||||
status.Hard[r] = resource.MustParse("1")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
_, err := IncrementUsage(admission.NewAttributesRecord(&api.Secret{}, api.Kind("Secret"), namespace, "name", api.Resource("secrets"), "", admission.Create, nil), status, client)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for exceeding hard limits")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExceedUsagePersistentVolumeClaims(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := fake.NewSimpleClientset(&api.PersistentVolumeClaimList{
|
||||
Items: []api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
},
|
||||
},
|
||||
})
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
}
|
||||
r := api.ResourcePersistentVolumeClaims
|
||||
status.Hard[r] = resource.MustParse("1")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
_, err := IncrementUsage(admission.NewAttributesRecord(&api.PersistentVolumeClaim{}, api.Kind("PersistentVolumeClaim"), namespace, "name", api.Resource("persistentvolumeclaims"), "", admission.Create, nil), status, client)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for exceeding hard limits")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementUsageOnUpdateIgnoresNonPodResources(t *testing.T) {
|
||||
testCase := []struct {
|
||||
kind unversioned.GroupKind
|
||||
resource unversioned.GroupResource
|
||||
subresource string
|
||||
object runtime.Object
|
||||
func TestHasUsageStats(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceQuota
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
kind: api.Kind("Service"),
|
||||
resource: api.Resource("services"),
|
||||
object: &api.Service{},
|
||||
"empty": {
|
||||
a: api.ResourceQuota{Status: api.ResourceQuotaStatus{Hard: api.ResourceList{}}},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
kind: api.Kind("ReplicationController"),
|
||||
resource: api.Resource("replicationcontrollers"),
|
||||
object: &api.ReplicationController{},
|
||||
"hard-only": {
|
||||
a: api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
{
|
||||
kind: api.Kind("ResourceQuota"),
|
||||
resource: api.Resource("resourcequotas"),
|
||||
object: &api.ResourceQuota{},
|
||||
},
|
||||
{
|
||||
kind: api.Kind("Secret"),
|
||||
resource: api.Resource("secrets"),
|
||||
object: &api.Secret{},
|
||||
},
|
||||
{
|
||||
kind: api.Kind("PersistentVolumeClaim"),
|
||||
resource: api.Resource("persistentvolumeclaims"),
|
||||
object: &api.PersistentVolumeClaim{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCase {
|
||||
client := fake.NewSimpleClientset()
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"hard-used": {
|
||||
a: api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
r := resourceToResourceName[testCase.resource]
|
||||
status.Hard[r] = resource.MustParse("2")
|
||||
status.Used[r] = resource.MustParse("1")
|
||||
|
||||
attributesRecord := admission.NewAttributesRecord(testCase.object, testCase.kind, "my-ns", "new-thing",
|
||||
testCase.resource, testCase.subresource, admission.Update, nil)
|
||||
dirty, err := IncrementUsage(attributesRecord, status, client)
|
||||
if err != nil {
|
||||
t.Errorf("Increment usage of resource %v had unexpected error: %v", testCase.resource, err)
|
||||
}
|
||||
if dirty {
|
||||
t.Errorf("Increment usage of resource %v should not result in a dirty quota on update", testCase.resource)
|
||||
for testName, testCase := range testCases {
|
||||
if result := hasUsageStats(&testCase.a); result != testCase.expected {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
417
test/e2e/resource_quota.go
Normal file
417
test/e2e/resource_quota.go
Normal file
@ -0,0 +1,417 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// how long to wait for a resource quota update to occur
|
||||
resourceQuotaTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
var _ = Describe("ResourceQuota", func() {
|
||||
f := NewFramework("resourcequota")
|
||||
|
||||
It("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
|
||||
By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuota(quotaName)
|
||||
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status is calculated")
|
||||
usedResources := api.ResourceList{}
|
||||
usedResources[api.ResourceQuotas] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota and capture the life of a service.", func() {
|
||||
By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuota(quotaName)
|
||||
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status is calculated")
|
||||
usedResources := api.ResourceList{}
|
||||
usedResources[api.ResourceQuotas] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Service")
|
||||
service := newTestServiceForQuota("test-service")
|
||||
service, err = f.Client.Services(f.Namespace.Name).Create(service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status captures service creation")
|
||||
usedResources = api.ResourceList{}
|
||||
usedResources[api.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[api.ResourceServices] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting a Service")
|
||||
err = f.Client.Services(f.Namespace.Name).Delete(service.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released usage")
|
||||
usedResources[api.ResourceServices] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota and capture the life of a pod.", func() {
|
||||
By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuota(quotaName)
|
||||
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status is calculated")
|
||||
usedResources := api.ResourceList{}
|
||||
usedResources[api.ResourceQuotas] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Pod that fits quota")
|
||||
podName := "test-pod"
|
||||
requests := api.ResourceList{}
|
||||
requests[api.ResourceCPU] = resource.MustParse("500m")
|
||||
requests[api.ResourceMemory] = resource.MustParse("252Mi")
|
||||
pod := newTestPodForQuota(podName, requests, api.ResourceList{})
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring ResourceQuota status captures the pod usage")
|
||||
usedResources[api.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("1")
|
||||
usedResources[api.ResourceCPU] = requests[api.ResourceCPU]
|
||||
usedResources[api.ResourceMemory] = requests[api.ResourceMemory]
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Not allowing a pod to be created that exceeds remaining quota")
|
||||
requests = api.ResourceList{}
|
||||
requests[api.ResourceCPU] = resource.MustParse("600m")
|
||||
requests[api.ResourceMemory] = resource.MustParse("100Mi")
|
||||
pod = newTestPodForQuota("fail-pod", requests, api.ResourceList{})
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[api.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[api.ResourceCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should verify ResourceQuota with terminating scopes.", func() {
|
||||
By("Creating a ResourceQuota with terminating scope")
|
||||
quotaTerminatingName := "quota-terminating"
|
||||
resourceQuotaTerminating, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring ResourceQuota status is calculated")
|
||||
usedResources := api.ResourceList{}
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a ResourceQuota with not terminating scope")
|
||||
quotaNotTerminatingName := "quota-not-terminating"
|
||||
resourceQuotaNotTerminating, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring ResourceQuota status is calculated")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a long running pod")
|
||||
podName := "test-pod"
|
||||
requests := api.ResourceList{}
|
||||
requests[api.ResourceCPU] = resource.MustParse("500m")
|
||||
requests[api.ResourceMemory] = resource.MustParse("200Mi")
|
||||
limits := api.ResourceList{}
|
||||
limits[api.ResourceCPU] = resource.MustParse("1")
|
||||
limits[api.ResourceMemory] = resource.MustParse("400Mi")
|
||||
pod := newTestPodForQuota(podName, requests, limits)
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with not terminating scope captures the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("1")
|
||||
usedResources[api.ResourceRequestsCPU] = requests[api.ResourceCPU]
|
||||
usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory]
|
||||
usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU]
|
||||
usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory]
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with terminating scope ignored the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a terminating pod")
|
||||
podName = "terminating-pod"
|
||||
pod = newTestPodForQuota(podName, requests, limits)
|
||||
activeDeadlineSeconds := int64(3600)
|
||||
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with terminating scope captures the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("1")
|
||||
usedResources[api.ResourceRequestsCPU] = requests[api.ResourceCPU]
|
||||
usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory]
|
||||
usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU]
|
||||
usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory]
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with not terminating scope ignored the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
|
||||
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should verify ResourceQuota with best effort scope.", func() {
|
||||
By("Creating a ResourceQuota with best effort scope")
|
||||
resourceQuotaBestEffort, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring ResourceQuota status is calculated")
|
||||
usedResources := api.ResourceList{}
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a ResourceQuota with not best effort scope")
|
||||
resourceQuotaNotBestEffort, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring ResourceQuota status is calculated")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a best-effort pod")
|
||||
pod := newTestPodForQuota(podName, api.ResourceList{}, api.ResourceList{})
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with best effort scope captures the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with not best effort ignored the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a not best-effort pod")
|
||||
requests := api.ResourceList{}
|
||||
requests[api.ResourceCPU] = resource.MustParse("500m")
|
||||
requests[api.ResourceMemory] = resource.MustParse("200Mi")
|
||||
limits := api.ResourceList{}
|
||||
limits[api.ResourceCPU] = resource.MustParse("1")
|
||||
limits[api.ResourceMemory] = resource.MustParse("400Mi")
|
||||
pod = newTestPodForQuota("burstable-pod", requests, limits)
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with not best effort scope captures the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota with best effort scope ignored the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[api.ResourcePods] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
|
||||
func newTestResourceQuotaWithScope(name string, scope api.ResourceQuotaScope) *api.ResourceQuota {
|
||||
hard := api.ResourceList{}
|
||||
hard[api.ResourcePods] = resource.MustParse("5")
|
||||
switch scope {
|
||||
case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating:
|
||||
hard[api.ResourceRequestsCPU] = resource.MustParse("1")
|
||||
hard[api.ResourceRequestsMemory] = resource.MustParse("500Mi")
|
||||
hard[api.ResourceLimitsCPU] = resource.MustParse("2")
|
||||
hard[api.ResourceLimitsMemory] = resource.MustParse("1Gi")
|
||||
}
|
||||
return &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: name},
|
||||
Spec: api.ResourceQuotaSpec{Hard: hard, Scopes: []api.ResourceQuotaScope{scope}},
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceQuota returns a quota that enforces default constraints for testing
|
||||
func newTestResourceQuota(name string) *api.ResourceQuota {
|
||||
hard := api.ResourceList{}
|
||||
hard[api.ResourcePods] = resource.MustParse("5")
|
||||
hard[api.ResourceServices] = resource.MustParse("10")
|
||||
hard[api.ResourceReplicationControllers] = resource.MustParse("10")
|
||||
hard[api.ResourceQuotas] = resource.MustParse("1")
|
||||
hard[api.ResourceCPU] = resource.MustParse("1")
|
||||
hard[api.ResourceMemory] = resource.MustParse("500Mi")
|
||||
return &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: name},
|
||||
Spec: api.ResourceQuotaSpec{Hard: hard},
|
||||
}
|
||||
}
|
||||
|
||||
// newTestPodForQuota returns a pod that has the specified requests and limits
|
||||
func newTestPodForQuota(name string, requests api.ResourceList, limits api.ResourceList) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newTestServiceForQuota returns a simple service
|
||||
func newTestServiceForQuota(name string) *api.Service {
|
||||
return &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createResourceQuota in the specified namespace
|
||||
func createResourceQuota(c *client.Client, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) {
|
||||
return c.ResourceQuotas(namespace).Create(resourceQuota)
|
||||
}
|
||||
|
||||
// deleteResourceQuota with the specified name
|
||||
func deleteResourceQuota(c *client.Client, namespace, name string) error {
|
||||
return c.ResourceQuotas(namespace).Delete(name)
|
||||
}
|
||||
|
||||
// wait for resource quota status to show the expected used resources value
|
||||
func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.ResourceList) error {
|
||||
return wait.Poll(poll, resourceQuotaTimeout, func() (bool, error) {
|
||||
resourceQuota, err := c.ResourceQuotas(ns).Get(quotaName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// used may not yet be calculated
|
||||
if resourceQuota.Status.Used == nil {
|
||||
return false, nil
|
||||
}
|
||||
// verify that the quota shows the expected used resource values
|
||||
for k, v := range used {
|
||||
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
|
||||
Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
Loading…
Reference in New Issue
Block a user