mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 02:09:56 +00:00
Merge pull request #128718 from tallclair/no-cpu-limit
[FG:InPlacePodVerticalScaling] Enable resizing containers without limits
This commit is contained in:
commit
f5d1fdf772
11
pkg/kubelet/cm/.mockery.yaml
Normal file
11
pkg/kubelet/cm/.mockery.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
dir: testing
|
||||
filename: "mock_{{.InterfaceName | snakecase}}.go"
|
||||
boilerplate-file: ../../../hack/boilerplate/boilerplate.generatego.txt
|
||||
outpkg: testing
|
||||
with-expecter: true
|
||||
packages:
|
||||
k8s.io/kubernetes/pkg/kubelet/cm:
|
||||
interfaces:
|
||||
ContainerManager:
|
||||
PodContainerManager:
|
@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
//go:generate mockery
|
||||
package cm
|
||||
|
||||
import (
|
||||
|
1636
pkg/kubelet/cm/testing/mock_container_manager.go
Normal file
1636
pkg/kubelet/cm/testing/mock_container_manager.go
Normal file
File diff suppressed because it is too large
Load Diff
572
pkg/kubelet/cm/testing/mock_pod_container_manager.go
Normal file
572
pkg/kubelet/cm/testing/mock_pod_container_manager.go
Normal file
@ -0,0 +1,572 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by mockery v2.40.3. DO NOT EDIT.
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
cm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// MockPodContainerManager is an autogenerated mock type for the PodContainerManager type
|
||||
type MockPodContainerManager struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockPodContainerManager_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockPodContainerManager) EXPECT() *MockPodContainerManager_Expecter {
|
||||
return &MockPodContainerManager_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Destroy provides a mock function with given fields: name
|
||||
func (_m *MockPodContainerManager) Destroy(name cm.CgroupName) error {
|
||||
ret := _m.Called(name)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Destroy")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(cm.CgroupName) error); ok {
|
||||
r0 = rf(name)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockPodContainerManager_Destroy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Destroy'
|
||||
type MockPodContainerManager_Destroy_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Destroy is a helper method to define mock.On call
|
||||
// - name cm.CgroupName
|
||||
func (_e *MockPodContainerManager_Expecter) Destroy(name interface{}) *MockPodContainerManager_Destroy_Call {
|
||||
return &MockPodContainerManager_Destroy_Call{Call: _e.mock.On("Destroy", name)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_Destroy_Call) Run(run func(name cm.CgroupName)) *MockPodContainerManager_Destroy_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(cm.CgroupName))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_Destroy_Call) Return(_a0 error) *MockPodContainerManager_Destroy_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_Destroy_Call) RunAndReturn(run func(cm.CgroupName) error) *MockPodContainerManager_Destroy_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// EnsureExists provides a mock function with given fields: _a0
|
||||
func (_m *MockPodContainerManager) EnsureExists(_a0 *v1.Pod) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for EnsureExists")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockPodContainerManager_EnsureExists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnsureExists'
|
||||
type MockPodContainerManager_EnsureExists_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// EnsureExists is a helper method to define mock.On call
|
||||
// - _a0 *v1.Pod
|
||||
func (_e *MockPodContainerManager_Expecter) EnsureExists(_a0 interface{}) *MockPodContainerManager_EnsureExists_Call {
|
||||
return &MockPodContainerManager_EnsureExists_Call{Call: _e.mock.On("EnsureExists", _a0)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_EnsureExists_Call) Run(run func(_a0 *v1.Pod)) *MockPodContainerManager_EnsureExists_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*v1.Pod))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_EnsureExists_Call) Return(_a0 error) *MockPodContainerManager_EnsureExists_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_EnsureExists_Call) RunAndReturn(run func(*v1.Pod) error) *MockPodContainerManager_EnsureExists_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Exists provides a mock function with given fields: _a0
|
||||
func (_m *MockPodContainerManager) Exists(_a0 *v1.Pod) bool {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Exists")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) bool); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockPodContainerManager_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists'
|
||||
type MockPodContainerManager_Exists_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Exists is a helper method to define mock.On call
|
||||
// - _a0 *v1.Pod
|
||||
func (_e *MockPodContainerManager_Expecter) Exists(_a0 interface{}) *MockPodContainerManager_Exists_Call {
|
||||
return &MockPodContainerManager_Exists_Call{Call: _e.mock.On("Exists", _a0)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_Exists_Call) Run(run func(_a0 *v1.Pod)) *MockPodContainerManager_Exists_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*v1.Pod))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_Exists_Call) Return(_a0 bool) *MockPodContainerManager_Exists_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_Exists_Call) RunAndReturn(run func(*v1.Pod) bool) *MockPodContainerManager_Exists_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetAllPodsFromCgroups provides a mock function with given fields:
|
||||
func (_m *MockPodContainerManager) GetAllPodsFromCgroups() (map[types.UID]cm.CgroupName, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetAllPodsFromCgroups")
|
||||
}
|
||||
|
||||
var r0 map[types.UID]cm.CgroupName
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (map[types.UID]cm.CgroupName, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() map[types.UID]cm.CgroupName); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[types.UID]cm.CgroupName)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockPodContainerManager_GetAllPodsFromCgroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllPodsFromCgroups'
|
||||
type MockPodContainerManager_GetAllPodsFromCgroups_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetAllPodsFromCgroups is a helper method to define mock.On call
|
||||
func (_e *MockPodContainerManager_Expecter) GetAllPodsFromCgroups() *MockPodContainerManager_GetAllPodsFromCgroups_Call {
|
||||
return &MockPodContainerManager_GetAllPodsFromCgroups_Call{Call: _e.mock.On("GetAllPodsFromCgroups")}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetAllPodsFromCgroups_Call) Run(run func()) *MockPodContainerManager_GetAllPodsFromCgroups_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetAllPodsFromCgroups_Call) Return(_a0 map[types.UID]cm.CgroupName, _a1 error) *MockPodContainerManager_GetAllPodsFromCgroups_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetAllPodsFromCgroups_Call) RunAndReturn(run func() (map[types.UID]cm.CgroupName, error)) *MockPodContainerManager_GetAllPodsFromCgroups_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetPodCgroupConfig provides a mock function with given fields: pod, resource
|
||||
func (_m *MockPodContainerManager) GetPodCgroupConfig(pod *v1.Pod, resource v1.ResourceName) (*cm.ResourceConfig, error) {
|
||||
ret := _m.Called(pod, resource)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetPodCgroupConfig")
|
||||
}
|
||||
|
||||
var r0 *cm.ResourceConfig
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod, v1.ResourceName) (*cm.ResourceConfig, error)); ok {
|
||||
return rf(pod, resource)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod, v1.ResourceName) *cm.ResourceConfig); ok {
|
||||
r0 = rf(pod, resource)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*cm.ResourceConfig)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(*v1.Pod, v1.ResourceName) error); ok {
|
||||
r1 = rf(pod, resource)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockPodContainerManager_GetPodCgroupConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodCgroupConfig'
|
||||
type MockPodContainerManager_GetPodCgroupConfig_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetPodCgroupConfig is a helper method to define mock.On call
|
||||
// - pod *v1.Pod
|
||||
// - resource v1.ResourceName
|
||||
func (_e *MockPodContainerManager_Expecter) GetPodCgroupConfig(pod interface{}, resource interface{}) *MockPodContainerManager_GetPodCgroupConfig_Call {
|
||||
return &MockPodContainerManager_GetPodCgroupConfig_Call{Call: _e.mock.On("GetPodCgroupConfig", pod, resource)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodCgroupConfig_Call) Run(run func(pod *v1.Pod, resource v1.ResourceName)) *MockPodContainerManager_GetPodCgroupConfig_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*v1.Pod), args[1].(v1.ResourceName))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodCgroupConfig_Call) Return(_a0 *cm.ResourceConfig, _a1 error) *MockPodContainerManager_GetPodCgroupConfig_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodCgroupConfig_Call) RunAndReturn(run func(*v1.Pod, v1.ResourceName) (*cm.ResourceConfig, error)) *MockPodContainerManager_GetPodCgroupConfig_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetPodCgroupMemoryUsage provides a mock function with given fields: pod
|
||||
func (_m *MockPodContainerManager) GetPodCgroupMemoryUsage(pod *v1.Pod) (uint64, error) {
|
||||
ret := _m.Called(pod)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetPodCgroupMemoryUsage")
|
||||
}
|
||||
|
||||
var r0 uint64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) (uint64, error)); ok {
|
||||
return rf(pod)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) uint64); ok {
|
||||
r0 = rf(pod)
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint64)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(*v1.Pod) error); ok {
|
||||
r1 = rf(pod)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockPodContainerManager_GetPodCgroupMemoryUsage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodCgroupMemoryUsage'
|
||||
type MockPodContainerManager_GetPodCgroupMemoryUsage_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetPodCgroupMemoryUsage is a helper method to define mock.On call
|
||||
// - pod *v1.Pod
|
||||
func (_e *MockPodContainerManager_Expecter) GetPodCgroupMemoryUsage(pod interface{}) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
|
||||
return &MockPodContainerManager_GetPodCgroupMemoryUsage_Call{Call: _e.mock.On("GetPodCgroupMemoryUsage", pod)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodCgroupMemoryUsage_Call) Run(run func(pod *v1.Pod)) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*v1.Pod))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodCgroupMemoryUsage_Call) Return(_a0 uint64, _a1 error) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodCgroupMemoryUsage_Call) RunAndReturn(run func(*v1.Pod) (uint64, error)) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetPodContainerName provides a mock function with given fields: _a0
|
||||
func (_m *MockPodContainerManager) GetPodContainerName(_a0 *v1.Pod) (cm.CgroupName, string) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetPodContainerName")
|
||||
}
|
||||
|
||||
var r0 cm.CgroupName
|
||||
var r1 string
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) (cm.CgroupName, string)); ok {
|
||||
return rf(_a0)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) cm.CgroupName); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(cm.CgroupName)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(*v1.Pod) string); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Get(1).(string)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockPodContainerManager_GetPodContainerName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodContainerName'
|
||||
type MockPodContainerManager_GetPodContainerName_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetPodContainerName is a helper method to define mock.On call
|
||||
// - _a0 *v1.Pod
|
||||
func (_e *MockPodContainerManager_Expecter) GetPodContainerName(_a0 interface{}) *MockPodContainerManager_GetPodContainerName_Call {
|
||||
return &MockPodContainerManager_GetPodContainerName_Call{Call: _e.mock.On("GetPodContainerName", _a0)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodContainerName_Call) Run(run func(_a0 *v1.Pod)) *MockPodContainerManager_GetPodContainerName_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*v1.Pod))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodContainerName_Call) Return(_a0 cm.CgroupName, _a1 string) *MockPodContainerManager_GetPodContainerName_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_GetPodContainerName_Call) RunAndReturn(run func(*v1.Pod) (cm.CgroupName, string)) *MockPodContainerManager_GetPodContainerName_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// IsPodCgroup provides a mock function with given fields: cgroupfs
|
||||
func (_m *MockPodContainerManager) IsPodCgroup(cgroupfs string) (bool, types.UID) {
|
||||
ret := _m.Called(cgroupfs)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IsPodCgroup")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 types.UID
|
||||
if rf, ok := ret.Get(0).(func(string) (bool, types.UID)); ok {
|
||||
return rf(cgroupfs)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(string) bool); ok {
|
||||
r0 = rf(cgroupfs)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(string) types.UID); ok {
|
||||
r1 = rf(cgroupfs)
|
||||
} else {
|
||||
r1 = ret.Get(1).(types.UID)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockPodContainerManager_IsPodCgroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsPodCgroup'
|
||||
type MockPodContainerManager_IsPodCgroup_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// IsPodCgroup is a helper method to define mock.On call
|
||||
// - cgroupfs string
|
||||
func (_e *MockPodContainerManager_Expecter) IsPodCgroup(cgroupfs interface{}) *MockPodContainerManager_IsPodCgroup_Call {
|
||||
return &MockPodContainerManager_IsPodCgroup_Call{Call: _e.mock.On("IsPodCgroup", cgroupfs)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_IsPodCgroup_Call) Run(run func(cgroupfs string)) *MockPodContainerManager_IsPodCgroup_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_IsPodCgroup_Call) Return(_a0 bool, _a1 types.UID) *MockPodContainerManager_IsPodCgroup_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_IsPodCgroup_Call) RunAndReturn(run func(string) (bool, types.UID)) *MockPodContainerManager_IsPodCgroup_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ReduceCPULimits provides a mock function with given fields: name
|
||||
func (_m *MockPodContainerManager) ReduceCPULimits(name cm.CgroupName) error {
|
||||
ret := _m.Called(name)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ReduceCPULimits")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(cm.CgroupName) error); ok {
|
||||
r0 = rf(name)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockPodContainerManager_ReduceCPULimits_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReduceCPULimits'
|
||||
type MockPodContainerManager_ReduceCPULimits_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ReduceCPULimits is a helper method to define mock.On call
|
||||
// - name cm.CgroupName
|
||||
func (_e *MockPodContainerManager_Expecter) ReduceCPULimits(name interface{}) *MockPodContainerManager_ReduceCPULimits_Call {
|
||||
return &MockPodContainerManager_ReduceCPULimits_Call{Call: _e.mock.On("ReduceCPULimits", name)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_ReduceCPULimits_Call) Run(run func(name cm.CgroupName)) *MockPodContainerManager_ReduceCPULimits_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(cm.CgroupName))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_ReduceCPULimits_Call) Return(_a0 error) *MockPodContainerManager_ReduceCPULimits_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_ReduceCPULimits_Call) RunAndReturn(run func(cm.CgroupName) error) *MockPodContainerManager_ReduceCPULimits_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetPodCgroupConfig provides a mock function with given fields: pod, resourceConfig
|
||||
func (_m *MockPodContainerManager) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *cm.ResourceConfig) error {
|
||||
ret := _m.Called(pod, resourceConfig)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SetPodCgroupConfig")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod, *cm.ResourceConfig) error); ok {
|
||||
r0 = rf(pod, resourceConfig)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockPodContainerManager_SetPodCgroupConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPodCgroupConfig'
|
||||
type MockPodContainerManager_SetPodCgroupConfig_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SetPodCgroupConfig is a helper method to define mock.On call
|
||||
// - pod *v1.Pod
|
||||
// - resourceConfig *cm.ResourceConfig
|
||||
func (_e *MockPodContainerManager_Expecter) SetPodCgroupConfig(pod interface{}, resourceConfig interface{}) *MockPodContainerManager_SetPodCgroupConfig_Call {
|
||||
return &MockPodContainerManager_SetPodCgroupConfig_Call{Call: _e.mock.On("SetPodCgroupConfig", pod, resourceConfig)}
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_SetPodCgroupConfig_Call) Run(run func(pod *v1.Pod, resourceConfig *cm.ResourceConfig)) *MockPodContainerManager_SetPodCgroupConfig_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*v1.Pod), args[1].(*cm.ResourceConfig))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_SetPodCgroupConfig_Call) Return(_a0 error) *MockPodContainerManager_SetPodCgroupConfig_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPodContainerManager_SetPodCgroupConfig_Call) RunAndReturn(run func(*v1.Pod, *cm.ResourceConfig) error) *MockPodContainerManager_SetPodCgroupConfig_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockPodContainerManager creates a new instance of MockPodContainerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockPodContainerManager(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockPodContainerManager {
|
||||
mock := &MockPodContainerManager{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1791,19 +1791,31 @@ func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontaine
|
||||
|
||||
// Only compare resizeable resources, and only compare resources that are explicitly configured.
|
||||
if hasCPUReq {
|
||||
// If both allocated & status CPU requests are at or below MinShares then they are considered equal.
|
||||
if !cpuReq.Equal(*cs.Resources.CPURequest) &&
|
||||
if cs.Resources.CPURequest == nil {
|
||||
if !cpuReq.IsZero() {
|
||||
return false
|
||||
}
|
||||
} else if !cpuReq.Equal(*cs.Resources.CPURequest) &&
|
||||
(cpuReq.MilliValue() > cm.MinShares || cs.Resources.CPURequest.MilliValue() > cm.MinShares) {
|
||||
// If both allocated & status CPU requests are at or below MinShares then they are considered equal.
|
||||
return false
|
||||
}
|
||||
}
|
||||
if hasCPULim {
|
||||
if !cpuLim.Equal(*cs.Resources.CPULimit) {
|
||||
if cs.Resources.CPULimit == nil {
|
||||
if !cpuLim.IsZero() {
|
||||
return false
|
||||
}
|
||||
} else if !cpuLim.Equal(*cs.Resources.CPULimit) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if hasMemLim {
|
||||
if !memLim.Equal(*cs.Resources.MemoryLimit) {
|
||||
if cs.Resources.MemoryLimit == nil {
|
||||
if !memLim.IsZero() {
|
||||
return false
|
||||
}
|
||||
} else if !memLim.Equal(*cs.Resources.MemoryLimit) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -6797,6 +6797,38 @@ func TestAllocatedResourcesMatchStatus(t *testing.T) {
|
||||
CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
|
||||
},
|
||||
expectMatch: true,
|
||||
}, {
|
||||
name: "nil status resources: cpu request mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{},
|
||||
expectMatch: false,
|
||||
}, {
|
||||
name: "nil status resources: cpu limit mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
|
||||
},
|
||||
expectMatch: false,
|
||||
}, {
|
||||
name: "nil status resources: memory limit mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{},
|
||||
expectMatch: false,
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -584,8 +584,10 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
cpuRequest: container.Resources.Requests.Cpu().MilliValue(),
|
||||
}
|
||||
|
||||
// Default current values to the desired values so that a resize isn't triggered for missing values.
|
||||
currentResources := desiredResources
|
||||
currentResources := containerResources{
|
||||
// memoryRequest isn't set by the runtime, so default it to the desired.
|
||||
memoryRequest: desiredResources.memoryRequest,
|
||||
}
|
||||
if kubeContainerStatus.Resources.MemoryLimit != nil {
|
||||
currentResources.memoryLimit = kubeContainerStatus.Resources.MemoryLimit.Value()
|
||||
}
|
||||
@ -666,7 +668,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *kubecontainer.PodStatus, podContainerChanges podActions, result kubecontainer.PodSyncResult) {
|
||||
func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerChanges podActions, result *kubecontainer.PodSyncResult) {
|
||||
pcm := m.containerManager.NewPodContainerManager()
|
||||
//TODO(vinaykul,InPlacePodVerticalScaling): Figure out best way to get enforceMemoryQoS value (parameter #4 below) in platform-agnostic way
|
||||
podResources := cm.ResourceConfigForPod(pod, m.cpuCFSQuota, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
|
||||
@ -688,7 +690,14 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku
|
||||
}
|
||||
err = pcm.SetPodCgroupConfig(pod, podCPUResources)
|
||||
case v1.ResourceMemory:
|
||||
err = pcm.SetPodCgroupConfig(pod, podResources)
|
||||
if !setLimitValue {
|
||||
// Memory requests aren't written to cgroups.
|
||||
return nil
|
||||
}
|
||||
podMemoryResources := &cm.ResourceConfig{
|
||||
Memory: podResources.Memory,
|
||||
}
|
||||
err = pcm.SetPodCgroupConfig(pod, podMemoryResources)
|
||||
}
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to set cgroup config", "resource", rName, "pod", pod.Name)
|
||||
@ -732,27 +741,28 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku
|
||||
return err
|
||||
}
|
||||
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources {
|
||||
if podResources.Memory == nil {
|
||||
klog.ErrorS(nil, "podResources.Memory is nil", "pod", pod.Name)
|
||||
result.Fail(fmt.Errorf("podResources.Memory is nil for pod %s", pod.Name))
|
||||
return
|
||||
}
|
||||
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "GetPodCgroupConfig for memory failed", "pod", pod.Name)
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
currentPodMemoryUsage, err := pcm.GetPodCgroupMemoryUsage(pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "GetPodCgroupMemoryUsage failed", "pod", pod.Name)
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
if currentPodMemoryUsage >= uint64(*podResources.Memory) {
|
||||
klog.ErrorS(nil, "Aborting attempt to set pod memory limit less than current memory usage", "pod", pod.Name)
|
||||
result.Fail(fmt.Errorf("aborting attempt to set pod memory limit less than current memory usage for pod %s", pod.Name))
|
||||
return
|
||||
if podResources.Memory != nil {
|
||||
currentPodMemoryUsage, err := pcm.GetPodCgroupMemoryUsage(pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "GetPodCgroupMemoryUsage failed", "pod", pod.Name)
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
if currentPodMemoryUsage >= uint64(*podResources.Memory) {
|
||||
klog.ErrorS(nil, "Aborting attempt to set pod memory limit less than current memory usage", "pod", pod.Name)
|
||||
result.Fail(fmt.Errorf("aborting attempt to set pod memory limit less than current memory usage for pod %s", pod.Name))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Default pod memory limit to the current memory limit if unset to prevent it from updating.
|
||||
// TODO(#128675): This does not support removing limits.
|
||||
podResources.Memory = currentPodMemoryConfig.Memory
|
||||
}
|
||||
if errResize := resizeContainers(v1.ResourceMemory, int64(*currentPodMemoryConfig.Memory), *podResources.Memory, 0, 0); errResize != nil {
|
||||
result.Fail(errResize)
|
||||
@ -760,9 +770,10 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku
|
||||
}
|
||||
}
|
||||
if len(podContainerChanges.ContainersToUpdate[v1.ResourceCPU]) > 0 || podContainerChanges.UpdatePodResources {
|
||||
if podResources.CPUQuota == nil || podResources.CPUShares == nil {
|
||||
klog.ErrorS(nil, "podResources.CPUQuota or podResources.CPUShares is nil", "pod", pod.Name)
|
||||
result.Fail(fmt.Errorf("podResources.CPUQuota or podResources.CPUShares is nil for pod %s", pod.Name))
|
||||
if podResources.CPUShares == nil {
|
||||
// This shouldn't happen: ResourceConfigForPod always returns a non-nil value for CPUShares.
|
||||
klog.ErrorS(nil, "podResources.CPUShares is nil", "pod", pod.Name)
|
||||
result.Fail(fmt.Errorf("podResources.CPUShares is nil for pod %s", pod.Name))
|
||||
return
|
||||
}
|
||||
currentPodCpuConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
|
||||
@ -771,6 +782,13 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Default pod CPUQuota to the current CPUQuota if no limit is set to prevent the pod limit
|
||||
// from updating.
|
||||
// TODO(#128675): This does not support removing limits.
|
||||
if podResources.CPUQuota == nil {
|
||||
podResources.CPUQuota = currentPodCpuConfig.CPUQuota
|
||||
}
|
||||
if errResize := resizeContainers(v1.ResourceCPU, *currentPodCpuConfig.CPUQuota, *podResources.CPUQuota,
|
||||
int64(*currentPodCpuConfig.CPUShares), int64(*podResources.CPUShares)); errResize != nil {
|
||||
result.Fail(errResize)
|
||||
@ -823,16 +841,21 @@ func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, res
|
||||
}
|
||||
switch resourceName {
|
||||
case v1.ResourceMemory:
|
||||
return status.Resources.MemoryLimit.Equal(*container.Resources.Limits.Memory())
|
||||
actualLimit := nonNilQuantity(status.Resources.MemoryLimit)
|
||||
return actualLimit.Equal(*container.Resources.Limits.Memory())
|
||||
case v1.ResourceCPU:
|
||||
if !status.Resources.CPULimit.Equal(*container.Resources.Limits.Cpu()) {
|
||||
actualLimit := nonNilQuantity(status.Resources.CPULimit)
|
||||
actualRequest := nonNilQuantity(status.Resources.CPURequest)
|
||||
desiredLimit := container.Resources.Limits.Cpu()
|
||||
desiredRequest := container.Resources.Requests.Cpu()
|
||||
if !actualLimit.Equal(*desiredLimit) {
|
||||
return false // limits don't match
|
||||
} else if status.Resources.CPURequest.Equal(*container.Resources.Requests.Cpu()) {
|
||||
} else if actualRequest.Equal(*desiredRequest) {
|
||||
return true // requests & limits both match
|
||||
}
|
||||
// Consider requests equal if both are at or below MinShares.
|
||||
return status.Resources.CPURequest.MilliValue() <= cm.MinShares &&
|
||||
container.Resources.Requests.Cpu().MilliValue() <= cm.MinShares
|
||||
return actualRequest.MilliValue() <= cm.MinShares &&
|
||||
desiredRequest.MilliValue() <= cm.MinShares
|
||||
default:
|
||||
return true // Shouldn't happen.
|
||||
}
|
||||
@ -854,6 +877,15 @@ func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, res
|
||||
return nil
|
||||
}
|
||||
|
||||
// nonNilQuantity returns a non-nil quantity. If the input is non-nil, it is returned. Otherwise a
|
||||
// pointer to the zero value is returned.
|
||||
func nonNilQuantity(q *resource.Quantity) *resource.Quantity {
|
||||
if q != nil {
|
||||
return q
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
// computePodActions checks whether the pod spec has changed and returns the changes if true.
|
||||
func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {
|
||||
klog.V(5).InfoS("Syncing Pod", "pod", klog.KObj(pod))
|
||||
@ -1351,7 +1383,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
|
||||
if IsInPlacePodVerticalScalingAllowed(pod) {
|
||||
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
|
||||
m.doPodResizeAction(pod, podStatus, podContainerChanges, result)
|
||||
m.doPodResizeAction(pod, podContainerChanges, &result)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
goruntime "runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
noopoteltrace "go.opentelemetry.io/otel/trace/noop"
|
||||
|
||||
@ -47,6 +49,8 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
cmtesting "k8s.io/kubernetes/pkg/kubelet/cm/testing"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
imagetypes "k8s.io/kubernetes/pkg/kubelet/images"
|
||||
@ -2826,3 +2830,197 @@ func TestGetImageVolumes(t *testing.T) {
|
||||
assert.Equal(t, tc.expectedImageVolumePulls, imageVolumePulls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoPodResizeAction(t *testing.T) {
|
||||
if goruntime.GOOS != "linux" {
|
||||
t.Skip("unsupported OS")
|
||||
}
|
||||
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
m.cpuCFSQuota = true // Enforce CPU Limits
|
||||
|
||||
for _, tc := range []struct {
|
||||
testName string
|
||||
currentResources containerResources
|
||||
desiredResources containerResources
|
||||
updatedResources []v1.ResourceName
|
||||
otherContainersHaveLimits bool
|
||||
expectedError string
|
||||
expectPodCgroupUpdates int
|
||||
}{
|
||||
{
|
||||
testName: "Increase cpu and memory requests and limits, with computed pod limits",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100, cpuLimit: 100,
|
||||
memoryRequest: 100, memoryLimit: 100,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 200, cpuLimit: 200,
|
||||
memoryRequest: 200, memoryLimit: 200,
|
||||
},
|
||||
otherContainersHaveLimits: true,
|
||||
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
|
||||
expectPodCgroupUpdates: 3, // cpu req, cpu lim, mem lim
|
||||
},
|
||||
{
|
||||
testName: "Increase cpu and memory requests and limits, without computed pod limits",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100, cpuLimit: 100,
|
||||
memoryRequest: 100, memoryLimit: 100,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 200, cpuLimit: 200,
|
||||
memoryRequest: 200, memoryLimit: 200,
|
||||
},
|
||||
// If some containers don't have limits, pod level limits are not applied
|
||||
otherContainersHaveLimits: false,
|
||||
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
|
||||
expectPodCgroupUpdates: 1, // cpu req, cpu lim, mem lim
|
||||
},
|
||||
{
|
||||
testName: "Increase cpu and memory requests only",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100, cpuLimit: 200,
|
||||
memoryRequest: 100, memoryLimit: 200,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 150, cpuLimit: 200,
|
||||
memoryRequest: 150, memoryLimit: 200,
|
||||
},
|
||||
updatedResources: []v1.ResourceName{v1.ResourceCPU},
|
||||
expectPodCgroupUpdates: 1, // cpu req
|
||||
},
|
||||
{
|
||||
testName: "Resize memory request no limits",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100,
|
||||
memoryRequest: 100,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 100,
|
||||
memoryRequest: 200,
|
||||
},
|
||||
// Memory request resize doesn't generate an update action.
|
||||
updatedResources: []v1.ResourceName{},
|
||||
},
|
||||
{
|
||||
testName: "Resize cpu request no limits",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100,
|
||||
memoryRequest: 100,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 200,
|
||||
memoryRequest: 100,
|
||||
},
|
||||
updatedResources: []v1.ResourceName{v1.ResourceCPU},
|
||||
expectPodCgroupUpdates: 1, // cpu req
|
||||
},
|
||||
{
|
||||
testName: "Add limits",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100,
|
||||
memoryRequest: 100,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 100, cpuLimit: 100,
|
||||
memoryRequest: 100, memoryLimit: 100,
|
||||
},
|
||||
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
|
||||
expectPodCgroupUpdates: 0,
|
||||
},
|
||||
{
|
||||
testName: "Add limits and pod limits",
|
||||
currentResources: containerResources{
|
||||
cpuRequest: 100,
|
||||
memoryRequest: 100,
|
||||
},
|
||||
desiredResources: containerResources{
|
||||
cpuRequest: 100, cpuLimit: 100,
|
||||
memoryRequest: 100, memoryLimit: 100,
|
||||
},
|
||||
otherContainersHaveLimits: true,
|
||||
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
|
||||
expectPodCgroupUpdates: 2, // cpu lim, memory lim
|
||||
},
|
||||
} {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
mockCM := cmtesting.NewMockContainerManager(t)
|
||||
m.containerManager = mockCM
|
||||
mockPCM := cmtesting.NewMockPodContainerManager(t)
|
||||
mockCM.EXPECT().NewPodContainerManager().Return(mockPCM)
|
||||
|
||||
mockPCM.EXPECT().GetPodCgroupConfig(mock.Anything, v1.ResourceMemory).Return(&cm.ResourceConfig{
|
||||
Memory: ptr.To(tc.currentResources.memoryLimit),
|
||||
}, nil).Maybe()
|
||||
mockPCM.EXPECT().GetPodCgroupMemoryUsage(mock.Anything).Return(0, nil).Maybe()
|
||||
// Set up mock pod cgroup config
|
||||
podCPURequest := tc.currentResources.cpuRequest
|
||||
podCPULimit := tc.currentResources.cpuLimit
|
||||
if tc.otherContainersHaveLimits {
|
||||
podCPURequest += 200
|
||||
podCPULimit += 200
|
||||
}
|
||||
mockPCM.EXPECT().GetPodCgroupConfig(mock.Anything, v1.ResourceCPU).Return(&cm.ResourceConfig{
|
||||
CPUShares: ptr.To(cm.MilliCPUToShares(podCPURequest)),
|
||||
CPUQuota: ptr.To(cm.MilliCPUToQuota(podCPULimit, cm.QuotaPeriod)),
|
||||
}, nil).Maybe()
|
||||
if tc.expectPodCgroupUpdates > 0 {
|
||||
mockPCM.EXPECT().SetPodCgroupConfig(mock.Anything, mock.Anything).Return(nil).Times(tc.expectPodCgroupUpdates)
|
||||
}
|
||||
|
||||
pod, kps := makeBasePodAndStatus()
|
||||
// pod spec and allocated resources are already updated as desired when doPodResizeAction() is called.
|
||||
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(tc.desiredResources.cpuRequest, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(tc.desiredResources.memoryRequest, resource.DecimalSI),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(tc.desiredResources.cpuLimit, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(tc.desiredResources.memoryLimit, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
if tc.otherContainersHaveLimits {
|
||||
resourceList := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
}
|
||||
resources := v1.ResourceRequirements{
|
||||
Requests: resourceList,
|
||||
Limits: resourceList,
|
||||
}
|
||||
pod.Spec.Containers[1].Resources = resources
|
||||
pod.Spec.Containers[2].Resources = resources
|
||||
}
|
||||
|
||||
updateInfo := containerToUpdateInfo{
|
||||
apiContainerIdx: 0,
|
||||
kubeContainerID: kps.ContainerStatuses[0].ID,
|
||||
desiredContainerResources: tc.desiredResources,
|
||||
currentContainerResources: &tc.currentResources,
|
||||
}
|
||||
containersToUpdate := make(map[v1.ResourceName][]containerToUpdateInfo)
|
||||
for _, r := range tc.updatedResources {
|
||||
containersToUpdate[r] = []containerToUpdateInfo{updateInfo}
|
||||
}
|
||||
|
||||
syncResult := &kubecontainer.PodSyncResult{}
|
||||
actions := podActions{
|
||||
ContainersToUpdate: containersToUpdate,
|
||||
}
|
||||
m.doPodResizeAction(pod, actions, syncResult)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
require.Error(t, syncResult.Error())
|
||||
require.EqualError(t, syncResult.Error(), tc.expectedError)
|
||||
} else {
|
||||
require.NoError(t, syncResult.Error())
|
||||
}
|
||||
|
||||
mock.AssertExpectationsForObjects(t, mockPCM)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -566,6 +566,24 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests - increase cpu request",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "200m", MemReq: "500Mi"},
|
||||
},
|
||||
},
|
||||
patchString: `{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"300m"}}}
|
||||
]}}`,
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "300m", MemReq: "500Mi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu requests - resize with equivalent request",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
@ -776,6 +794,102 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, mixed containers - scale up cpu and memory",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Resources: &e2epod.ContainerResources{},
|
||||
},
|
||||
},
|
||||
patchString: `{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"200Mi"},"limits":{"cpu":"200m","memory":"200Mi"}}}
|
||||
]}}`,
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "200Mi", MemLim: "200Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Resources: &e2epod.ContainerResources{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, mixed containers - add requests",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Resources: &e2epod.ContainerResources{},
|
||||
},
|
||||
},
|
||||
patchString: `{"spec":{"containers":[
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"100m","memory":"100Mi"}}}
|
||||
]}}`,
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", MemReq: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, mixed containers - add limits",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", MemReq: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
},
|
||||
patchString: `{"spec":{"containers":[
|
||||
{"name":"c2", "resources":{"limits":{"cpu":"200m","memory":"200Mi"}}}
|
||||
]}}`,
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Guaranteed QoS pod, one container - increase CPU & memory with an extended resource",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
|
Loading…
Reference in New Issue
Block a user