mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Merge pull request #12600 from derekwaynecarr/resource_quota_request
Update ResourceQuota for resource requirements requests
This commit is contained in:
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package resourcequotacontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@@ -163,18 +164,6 @@ func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
switch k {
|
||||
case api.ResourcePods:
|
||||
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
|
||||
case api.ResourceMemory:
|
||||
val := int64(0)
|
||||
for _, pod := range filteredPods {
|
||||
val = val + PodMemory(pod).Value()
|
||||
}
|
||||
value = resource.NewQuantity(int64(val), resource.DecimalSI)
|
||||
case api.ResourceCPU:
|
||||
val := int64(0)
|
||||
for _, pod := range filteredPods {
|
||||
val = val + PodCPU(pod).MilliValue()
|
||||
}
|
||||
value = resource.NewMilliQuantity(int64(val), resource.DecimalSI)
|
||||
case api.ResourceServices:
|
||||
items, err := rm.kubeClient.Services(usage.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
@@ -205,6 +194,10 @@ func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceMemory:
|
||||
value = PodsRequests(filteredPods, api.ResourceMemory)
|
||||
case api.ResourceCPU:
|
||||
value = PodsRequests(filteredPods, api.ResourceCPU)
|
||||
}
|
||||
|
||||
// ignore fields we do not understand (assume another controller is tracking it)
|
||||
@@ -224,7 +217,73 @@ func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodCPU computes total cpu usage of a pod
|
||||
// PodsRequests returns sum of each resource request for each pod in list
|
||||
// If a given pod in the list does not have a request for the named resource, we log the error
|
||||
// but still attempt to get the most representative count
|
||||
func PodsRequests(pods []*api.Pod, resourceName api.ResourceName) *resource.Quantity {
|
||||
var sum *resource.Quantity
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
podQuantity, err := PodRequests(pod, resourceName)
|
||||
if err != nil {
|
||||
// log the error, but try to keep the most accurate count possible in log
|
||||
// rationale here is that you may have had pods in a namespace that did not have
|
||||
// explicit requests prior to adding the quota
|
||||
glog.Infof("No explicit request for resource, pod %s/%s, %s", pod.Namespace, pod.Name, resourceName)
|
||||
} else {
|
||||
if sum == nil {
|
||||
sum = podQuantity
|
||||
} else {
|
||||
sum.Add(*podQuantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
// if list is empty
|
||||
if sum == nil {
|
||||
q := resource.MustParse("0")
|
||||
sum = &q
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// PodRequests returns sum of each resource request across all containers in pod
|
||||
func PodRequests(pod *api.Pod, resourceName api.ResourceName) (*resource.Quantity, error) {
|
||||
if !PodHasRequests(pod, resourceName) {
|
||||
return nil, fmt.Errorf("Each container in pod %s/%s does not have an explicit request for resource %s.", pod.Namespace, pod.Name, resourceName)
|
||||
}
|
||||
var sum *resource.Quantity
|
||||
for j := range pod.Spec.Containers {
|
||||
value, _ := pod.Spec.Containers[j].Resources.Requests[resourceName]
|
||||
if sum == nil {
|
||||
sum = value.Copy()
|
||||
} else {
|
||||
err := sum.Add(value)
|
||||
if err != nil {
|
||||
return sum, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// if list is empty
|
||||
if sum == nil {
|
||||
q := resource.MustParse("0")
|
||||
sum = &q
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
// PodHasRequests verifies that each container in the pod has an explicit request that is non-zero for a named resource
|
||||
func PodHasRequests(pod *api.Pod, resourceName api.ResourceName) bool {
|
||||
for j := range pod.Spec.Containers {
|
||||
value, valueSet := pod.Spec.Containers[j].Resources.Requests[resourceName]
|
||||
if !valueSet || value.Value() == int64(0) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PodCPU computes total cpu limit across all containers in pod
|
||||
// TODO: Remove this once the mesos scheduler becomes request aware
|
||||
func PodCPU(pod *api.Pod) *resource.Quantity {
|
||||
val := int64(0)
|
||||
for j := range pod.Spec.Containers {
|
||||
@@ -233,29 +292,8 @@ func PodCPU(pod *api.Pod) *resource.Quantity {
|
||||
return resource.NewMilliQuantity(int64(val), resource.DecimalSI)
|
||||
}
|
||||
|
||||
// IsPodCPUUnbounded returns true if the cpu use is unbounded for any container in pod
|
||||
func IsPodCPUUnbounded(pod *api.Pod) bool {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := pod.Spec.Containers[j]
|
||||
if container.Resources.Limits.Cpu().MilliValue() == int64(0) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodMemoryUnbounded returns true if the memory use is unbounded for any container in pod
|
||||
func IsPodMemoryUnbounded(pod *api.Pod) bool {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := pod.Spec.Containers[j]
|
||||
if container.Resources.Limits.Memory().Value() == int64(0) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodMemory computes the memory usage of a pod
|
||||
// PodMemory computes total memory limit across all containers in a pod
|
||||
// TODO: Remove this once the mesos scheduler becomes request aware
|
||||
func PodMemory(pod *api.Pod) *resource.Quantity {
|
||||
val := int64(0)
|
||||
for j := range pod.Spec.Containers {
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package resourcequotacontroller
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
@@ -25,19 +26,39 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func getResourceRequirements(cpu, memory string) api.ResourceRequirements {
|
||||
res := api.ResourceRequirements{}
|
||||
res.Limits = api.ResourceList{}
|
||||
func getResourceList(cpu, memory string) api.ResourceList {
|
||||
res := api.ResourceList{}
|
||||
if cpu != "" {
|
||||
res.Limits[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
res[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res.Limits[api.ResourceMemory] = resource.MustParse(memory)
|
||||
res[api.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
|
||||
res := api.ResourceRequirements{}
|
||||
res.Requests = requests
|
||||
res.Limits = limits
|
||||
return res
|
||||
}
|
||||
|
||||
func validPod(name string, numContainers int, resources api.ResourceRequirements) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PodSpec{},
|
||||
}
|
||||
pod.Spec.Containers = make([]api.Container, 0, numContainers)
|
||||
for i := 0; i < numContainers; i++ {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
|
||||
Image: "foo:V" + strconv.Itoa(i),
|
||||
Resources: resources,
|
||||
})
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func TestFilterQuotaPods(t *testing.T) {
|
||||
pods := []api.Pod{
|
||||
{
|
||||
@@ -105,7 +126,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -113,7 +134,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -121,7 +142,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -144,7 +165,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
api.ResourceMemory: resource.MustParse("2147483648"),
|
||||
api.ResourceMemory: resource.MustParse("2Gi"),
|
||||
api.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
@@ -177,7 +198,6 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
@@ -269,62 +289,151 @@ func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodCPUUnbounded(t *testing.T) {
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "0")}},
|
||||
func TestPodHasRequests(t *testing.T) {
|
||||
type testCase struct {
|
||||
pod *api.Pod
|
||||
resourceName api.ResourceName
|
||||
expectedResult bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: validPod("request-cpu", 2, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-cpu", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-cpu", 2, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-memory", 2, getResourceRequirements(getResourceList("", "2Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-memory", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-memory", 2, getResourceRequirements(getResourceList("", "0"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
if IsPodCPUUnbounded(&pod) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
pod = api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}},
|
||||
},
|
||||
}
|
||||
if !IsPodCPUUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Resources = api.ResourceRequirements{}
|
||||
if !IsPodCPUUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
for _, item := range testCases {
|
||||
if actual := PodHasRequests(item.pod, item.resourceName); item.expectedResult != actual {
|
||||
t.Errorf("Pod %s for resource %s expected %v actual %v", item.pod.Name, item.resourceName, item.expectedResult, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodMemoryUnbounded(t *testing.T) {
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "1Gi")}},
|
||||
func TestPodRequests(t *testing.T) {
|
||||
type testCase struct {
|
||||
pod *api.Pod
|
||||
resourceName api.ResourceName
|
||||
expectedResult string
|
||||
expectedError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: validPod("request-cpu", 2, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "200m",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-cpu", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-cpu", 2, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-memory", 2, getResourceRequirements(getResourceList("", "500Mi"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "1000Mi",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
pod: validPod("no-request-memory", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
pod: validPod("request-zero-memory", 2, getResourceRequirements(getResourceList("", "0"), getResourceList("", ""))),
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "",
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
if IsPodMemoryUnbounded(&pod) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
pod = api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}},
|
||||
},
|
||||
}
|
||||
if !IsPodMemoryUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Resources = api.ResourceRequirements{}
|
||||
if !IsPodMemoryUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
for _, item := range testCases {
|
||||
actual, err := PodRequests(item.pod, item.resourceName)
|
||||
if item.expectedError != (err != nil) {
|
||||
t.Errorf("Unexpected error result for pod %s for resource %s expected error %v got %v", item.pod.Name, item.resourceName, item.expectedError, err)
|
||||
}
|
||||
if item.expectedResult != "" && (item.expectedResult != actual.String()) {
|
||||
t.Errorf("Expected %s, Actual %s, pod %s for resource %s", item.expectedResult, actual.String(), item.pod.Name, item.resourceName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodsRequests(t *testing.T) {
|
||||
type testCase struct {
|
||||
pods []*api.Pod
|
||||
resourceName api.ResourceName
|
||||
expectedResult string
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("request-cpu-1", 1, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))),
|
||||
validPod("request-cpu-2", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "1100m",
|
||||
},
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("no-request-cpu-1", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
validPod("no-request-cpu-2", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
},
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("request-zero-cpu-1", 1, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
validPod("request-zero-cpu-1", 1, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
expectedResult: "",
|
||||
},
|
||||
{
|
||||
pods: []*api.Pod{
|
||||
validPod("request-memory-1", 1, getResourceRequirements(getResourceList("", "500Mi"), getResourceList("", ""))),
|
||||
validPod("request-memory-2", 1, getResourceRequirements(getResourceList("", "1Gi"), getResourceList("", ""))),
|
||||
},
|
||||
resourceName: api.ResourceMemory,
|
||||
expectedResult: "1524Mi",
|
||||
},
|
||||
}
|
||||
for _, item := range testCases {
|
||||
actual := PodsRequests(item.pods, item.resourceName)
|
||||
if item.expectedResult != "" && (item.expectedResult != actual.String()) {
|
||||
t.Errorf("Expected %s, Actual %s, pod %s for resource %s", item.expectedResult, actual.String(), item.pods[0].Name, item.resourceName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user