mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #127525 from scott-grimes/patch-1
fix: pods meeting qualifications for static placement when cpu-manager-policy=static should not have cfs quota enforcement
This commit is contained in:
commit
cd2959b798
@ -841,6 +841,22 @@ const (
|
|||||||
//
|
//
|
||||||
// Enables specifying resources at pod-level.
|
// Enables specifying resources at pod-level.
|
||||||
PodLevelResources featuregate.Feature = "PodLevelResources"
|
PodLevelResources featuregate.Feature = "PodLevelResources"
|
||||||
|
|
||||||
|
// owner: @ffromani
|
||||||
|
// beta: v1.33
|
||||||
|
//
|
||||||
|
// Disables CPU Quota for containers which have exclusive CPUs allocated.
|
||||||
|
// Disables pod-Level CPU Quota for pods containing at least one container with exclusive CPUs allocated
|
||||||
|
// Exclusive CPUs for a container (init, application, sidecar) are allocated when:
|
||||||
|
// (1) cpumanager policy is static,
|
||||||
|
// (2) the pod has QoS Guaranteed,
|
||||||
|
// (3) the container has integer cpu request.
|
||||||
|
// The expected behavior is that CPU Quota for containers having exclusive CPUs allocated is disabled.
|
||||||
|
// Because this fix changes a long-established (but incorrect) behavior, users observing
|
||||||
|
// any regressions can use the DisableCPUQuotaWithExclusiveCPUs feature gate (default on) to
|
||||||
|
// restore the old behavior. Please file issues if you hit issues and have to use this Feature Gate.
|
||||||
|
// The Feature Gate will be locked to true and then removed in +2 releases (1.35) if there are no bug reported
|
||||||
|
DisableCPUQuotaWithExclusiveCPUs featuregate.Feature = "DisableCPUQuotaWithExclusiveCPUs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -829,4 +829,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
|||||||
zpagesfeatures.ComponentStatusz: {
|
zpagesfeatures.ComponentStatusz: {
|
||||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||||
},
|
},
|
||||||
|
DisableCPUQuotaWithExclusiveCPUs: {
|
||||||
|
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apiserver/pkg/server/healthz"
|
"k8s.io/apiserver/pkg/server/healthz"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||||
@ -154,6 +155,13 @@ type ContainerManager interface {
|
|||||||
// Updates returns a channel that receives an Update when the device changed its status.
|
// Updates returns a channel that receives an Update when the device changed its status.
|
||||||
Updates() <-chan resourceupdates.Update
|
Updates() <-chan resourceupdates.Update
|
||||||
|
|
||||||
|
// PodHasExclusiveCPUs returns true if the provided pod has containers with exclusive CPUs,
|
||||||
|
// This means that at least one sidecar container or one app container has exclusive CPUs allocated.
|
||||||
|
PodHasExclusiveCPUs(pod *v1.Pod) bool
|
||||||
|
|
||||||
|
// ContainerHasExclusiveCPUs returns true if the provided container in the pod has exclusive cpu
|
||||||
|
ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool
|
||||||
|
|
||||||
// Implements the PodResources Provider API
|
// Implements the PodResources Provider API
|
||||||
podresources.CPUsProvider
|
podresources.CPUsProvider
|
||||||
podresources.DevicesProvider
|
podresources.DevicesProvider
|
||||||
@ -161,6 +169,10 @@ type ContainerManager interface {
|
|||||||
podresources.DynamicResourcesProvider
|
podresources.DynamicResourcesProvider
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type cpuAllocationReader interface {
|
||||||
|
GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet
|
||||||
|
}
|
||||||
|
|
||||||
type NodeConfig struct {
|
type NodeConfig struct {
|
||||||
NodeName types.NodeName
|
NodeName types.NodeName
|
||||||
RuntimeCgroupsName string
|
RuntimeCgroupsName string
|
||||||
@ -212,6 +224,30 @@ func int64Slice(in []int) []int64 {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func podHasExclusiveCPUs(cr cpuAllocationReader, pod *v1.Pod) bool {
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
if containerHasExclusiveCPUs(cr, pod, &container) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, container := range pod.Spec.Containers {
|
||||||
|
if containerHasExclusiveCPUs(cr, pod, &container) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
klog.V(4).InfoS("Pod contains no container with pinned cpus", "podName", pod.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerHasExclusiveCPUs(cr cpuAllocationReader, pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
exclusiveCPUs := cr.GetExclusiveCPUs(string(pod.UID), container.Name)
|
||||||
|
if !exclusiveCPUs.IsEmpty() {
|
||||||
|
klog.V(4).InfoS("Container has pinned cpus", "podName", pod.Name, "containerName", container.Name)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// parsePercentage parses the percentage string to numeric value.
|
// parsePercentage parses the percentage string to numeric value.
|
||||||
func parsePercentage(v string) (int64, error) {
|
func parsePercentage(v string) (int64, error) {
|
||||||
if !strings.HasSuffix(v, "%") {
|
if !strings.HasSuffix(v, "%") {
|
||||||
|
@ -366,6 +366,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
|||||||
// cpuCFSQuotaPeriod is in microseconds. NodeConfig.CPUCFSQuotaPeriod is time.Duration (measured in nano seconds).
|
// cpuCFSQuotaPeriod is in microseconds. NodeConfig.CPUCFSQuotaPeriod is time.Duration (measured in nano seconds).
|
||||||
// Convert (cm.CPUCFSQuotaPeriod) [nanoseconds] / time.Microsecond (1000) to get cpuCFSQuotaPeriod in microseconds.
|
// Convert (cm.CPUCFSQuotaPeriod) [nanoseconds] / time.Microsecond (1000) to get cpuCFSQuotaPeriod in microseconds.
|
||||||
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
|
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
|
||||||
|
podContainerManager: cm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &podContainerManagerNoop{
|
return &podContainerManagerNoop{
|
||||||
@ -373,6 +374,14 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerImpl) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||||
|
return podHasExclusiveCPUs(cm.cpuManager, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerImpl) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
return containerHasExclusiveCPUs(cm.cpuManager, pod, container)
|
||||||
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
|
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
|
||||||
return &internalContainerLifecycleImpl{cm.cpuManager, cm.memoryManager, cm.topologyManager}
|
return &internalContainerLifecycleImpl{cm.cpuManager, cm.memoryManager, cm.topologyManager}
|
||||||
}
|
}
|
||||||
|
@ -195,6 +195,14 @@ func (cm *containerManagerStub) Updates() <-chan resourceupdates.Update {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerStub) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerStub) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func NewStubContainerManager() ContainerManager {
|
func NewStubContainerManager() ContainerManager {
|
||||||
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
|
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
|
||||||
}
|
}
|
||||||
|
@ -369,3 +369,11 @@ func (cm *containerManagerImpl) UnprepareDynamicResources(ctx context.Context, p
|
|||||||
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
|
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerImpl) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||||
|
return podHasExclusiveCPUs(cm.cpuManager, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *containerManagerImpl) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
return containerHasExclusiveCPUs(cm.cpuManager, pod, container)
|
||||||
|
}
|
||||||
|
@ -268,3 +268,11 @@ func (cm *FakeContainerManager) UpdateAllocatedResourcesStatus(pod *v1.Pod, stat
|
|||||||
func (cm *FakeContainerManager) Updates() <-chan resourceupdates.Update {
|
func (cm *FakeContainerManager) Updates() <-chan resourceupdates.Update {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cm *FakeContainerManager) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *FakeContainerManager) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -55,6 +55,8 @@ type podContainerManagerImpl struct {
|
|||||||
// cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per
|
// cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per
|
||||||
// node for all containers in usec
|
// node for all containers in usec
|
||||||
cpuCFSQuotaPeriod uint64
|
cpuCFSQuotaPeriod uint64
|
||||||
|
// podContainerManager is the ContainerManager running on the machine
|
||||||
|
podContainerManager ContainerManager
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
|
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
|
||||||
@ -73,6 +75,11 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
|||||||
// check if container already exist
|
// check if container already exist
|
||||||
alreadyExists := m.Exists(pod)
|
alreadyExists := m.Exists(pod)
|
||||||
if !alreadyExists {
|
if !alreadyExists {
|
||||||
|
enforceCPULimits := m.enforceCPULimits
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.podContainerManager.PodHasExclusiveCPUs(pod) {
|
||||||
|
klog.V(2).InfoS("Disabled CFS quota", "pod", klog.KObj(pod))
|
||||||
|
enforceCPULimits = false
|
||||||
|
}
|
||||||
enforceMemoryQoS := false
|
enforceMemoryQoS := false
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
|
||||||
libcontainercgroups.IsCgroup2UnifiedMode() {
|
libcontainercgroups.IsCgroup2UnifiedMode() {
|
||||||
@ -82,7 +89,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
|||||||
podContainerName, _ := m.GetPodContainerName(pod)
|
podContainerName, _ := m.GetPodContainerName(pod)
|
||||||
containerConfig := &CgroupConfig{
|
containerConfig := &CgroupConfig{
|
||||||
Name: podContainerName,
|
Name: podContainerName,
|
||||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod, enforceMemoryQoS),
|
ResourceParameters: ResourceConfigForPod(pod, enforceCPULimits, m.cpuCFSQuotaPeriod, enforceMemoryQoS),
|
||||||
}
|
}
|
||||||
if m.podPidsLimit > 0 {
|
if m.podPidsLimit > 0 {
|
||||||
containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit
|
containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit
|
||||||
|
@ -28,8 +28,6 @@ import (
|
|||||||
|
|
||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
|
|
||||||
cri "k8s.io/cri-api/pkg/apis"
|
cri "k8s.io/cri-api/pkg/apis"
|
||||||
|
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
@ -40,13 +38,15 @@ import (
|
|||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
|
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
|
|
||||||
resourceupdates "k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
|
resourceupdates "k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
|
||||||
|
|
||||||
status "k8s.io/kubernetes/pkg/kubelet/status"
|
status "k8s.io/kubernetes/pkg/kubelet/status"
|
||||||
|
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockContainerManager is an autogenerated mock type for the ContainerManager type
|
// MockContainerManager is an autogenerated mock type for the ContainerManager type
|
||||||
@ -62,6 +62,53 @@ func (_m *MockContainerManager) EXPECT() *MockContainerManager_Expecter {
|
|||||||
return &MockContainerManager_Expecter{mock: &_m.Mock}
|
return &MockContainerManager_Expecter{mock: &_m.Mock}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContainerHasExclusiveCPUs provides a mock function with given fields: pod, _a1
|
||||||
|
func (_m *MockContainerManager) ContainerHasExclusiveCPUs(pod *v1.Pod, _a1 *v1.Container) bool {
|
||||||
|
ret := _m.Called(pod, _a1)
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
panic("no return value specified for ContainerHasExclusiveCPUs")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r0 bool
|
||||||
|
if rf, ok := ret.Get(0).(func(*v1.Pod, *v1.Container) bool); ok {
|
||||||
|
r0 = rf(pod, _a1)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockContainerManager_ContainerHasExclusiveCPUs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerHasExclusiveCPUs'
|
||||||
|
type MockContainerManager_ContainerHasExclusiveCPUs_Call struct {
|
||||||
|
*mock.Call
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerHasExclusiveCPUs is a helper method to define mock.On call
|
||||||
|
// - pod *v1.Pod
|
||||||
|
// - _a1 *v1.Container
|
||||||
|
func (_e *MockContainerManager_Expecter) ContainerHasExclusiveCPUs(pod interface{}, _a1 interface{}) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
|
||||||
|
return &MockContainerManager_ContainerHasExclusiveCPUs_Call{Call: _e.mock.On("ContainerHasExclusiveCPUs", pod, _a1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockContainerManager_ContainerHasExclusiveCPUs_Call) Run(run func(pod *v1.Pod, _a1 *v1.Container)) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
|
||||||
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
|
run(args[0].(*v1.Pod), args[1].(*v1.Container))
|
||||||
|
})
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockContainerManager_ContainerHasExclusiveCPUs_Call) Return(_a0 bool) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
|
||||||
|
_c.Call.Return(_a0)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockContainerManager_ContainerHasExclusiveCPUs_Call) RunAndReturn(run func(*v1.Pod, *v1.Container) bool) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
|
||||||
|
_c.Call.Return(run)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// GetAllocatableCPUs provides a mock function with given fields:
|
// GetAllocatableCPUs provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) GetAllocatableCPUs() []int64 {
|
func (_m *MockContainerManager) GetAllocatableCPUs() []int64 {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
@ -110,19 +157,19 @@ func (_c *MockContainerManager_GetAllocatableCPUs_Call) RunAndReturn(run func()
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetAllocatableDevices provides a mock function with given fields:
|
// GetAllocatableDevices provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) GetAllocatableDevices() []*v1.ContainerDevices {
|
func (_m *MockContainerManager) GetAllocatableDevices() []*podresourcesv1.ContainerDevices {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetAllocatableDevices")
|
panic("no return value specified for GetAllocatableDevices")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 []*v1.ContainerDevices
|
var r0 []*podresourcesv1.ContainerDevices
|
||||||
if rf, ok := ret.Get(0).(func() []*v1.ContainerDevices); ok {
|
if rf, ok := ret.Get(0).(func() []*podresourcesv1.ContainerDevices); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).([]*v1.ContainerDevices)
|
r0 = ret.Get(0).([]*podresourcesv1.ContainerDevices)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,30 +193,30 @@ func (_c *MockContainerManager_GetAllocatableDevices_Call) Run(run func()) *Mock
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetAllocatableDevices_Call) Return(_a0 []*v1.ContainerDevices) *MockContainerManager_GetAllocatableDevices_Call {
|
func (_c *MockContainerManager_GetAllocatableDevices_Call) Return(_a0 []*podresourcesv1.ContainerDevices) *MockContainerManager_GetAllocatableDevices_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetAllocatableDevices_Call) RunAndReturn(run func() []*v1.ContainerDevices) *MockContainerManager_GetAllocatableDevices_Call {
|
func (_c *MockContainerManager_GetAllocatableDevices_Call) RunAndReturn(run func() []*podresourcesv1.ContainerDevices) *MockContainerManager_GetAllocatableDevices_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllocatableMemory provides a mock function with given fields:
|
// GetAllocatableMemory provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) GetAllocatableMemory() []*v1.ContainerMemory {
|
func (_m *MockContainerManager) GetAllocatableMemory() []*podresourcesv1.ContainerMemory {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetAllocatableMemory")
|
panic("no return value specified for GetAllocatableMemory")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 []*v1.ContainerMemory
|
var r0 []*podresourcesv1.ContainerMemory
|
||||||
if rf, ok := ret.Get(0).(func() []*v1.ContainerMemory); ok {
|
if rf, ok := ret.Get(0).(func() []*podresourcesv1.ContainerMemory); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).([]*v1.ContainerMemory)
|
r0 = ret.Get(0).([]*podresourcesv1.ContainerMemory)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,12 +240,12 @@ func (_c *MockContainerManager_GetAllocatableMemory_Call) Run(run func()) *MockC
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetAllocatableMemory_Call) Return(_a0 []*v1.ContainerMemory) *MockContainerManager_GetAllocatableMemory_Call {
|
func (_c *MockContainerManager_GetAllocatableMemory_Call) Return(_a0 []*podresourcesv1.ContainerMemory) *MockContainerManager_GetAllocatableMemory_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetAllocatableMemory_Call) RunAndReturn(run func() []*v1.ContainerMemory) *MockContainerManager_GetAllocatableMemory_Call {
|
func (_c *MockContainerManager_GetAllocatableMemory_Call) RunAndReturn(run func() []*podresourcesv1.ContainerMemory) *MockContainerManager_GetAllocatableMemory_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -300,19 +347,19 @@ func (_c *MockContainerManager_GetCPUs_Call) RunAndReturn(run func(string, strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCapacity provides a mock function with given fields: localStorageCapacityIsolation
|
// GetCapacity provides a mock function with given fields: localStorageCapacityIsolation
|
||||||
func (_m *MockContainerManager) GetCapacity(localStorageCapacityIsolation bool) corev1.ResourceList {
|
func (_m *MockContainerManager) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
|
||||||
ret := _m.Called(localStorageCapacityIsolation)
|
ret := _m.Called(localStorageCapacityIsolation)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetCapacity")
|
panic("no return value specified for GetCapacity")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 corev1.ResourceList
|
var r0 v1.ResourceList
|
||||||
if rf, ok := ret.Get(0).(func(bool) corev1.ResourceList); ok {
|
if rf, ok := ret.Get(0).(func(bool) v1.ResourceList); ok {
|
||||||
r0 = rf(localStorageCapacityIsolation)
|
r0 = rf(localStorageCapacityIsolation)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).(corev1.ResourceList)
|
r0 = ret.Get(0).(v1.ResourceList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,43 +384,43 @@ func (_c *MockContainerManager_GetCapacity_Call) Run(run func(localStorageCapaci
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetCapacity_Call) Return(_a0 corev1.ResourceList) *MockContainerManager_GetCapacity_Call {
|
func (_c *MockContainerManager_GetCapacity_Call) Return(_a0 v1.ResourceList) *MockContainerManager_GetCapacity_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetCapacity_Call) RunAndReturn(run func(bool) corev1.ResourceList) *MockContainerManager_GetCapacity_Call {
|
func (_c *MockContainerManager_GetCapacity_Call) RunAndReturn(run func(bool) v1.ResourceList) *MockContainerManager_GetCapacity_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDevicePluginResourceCapacity provides a mock function with given fields:
|
// GetDevicePluginResourceCapacity provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) GetDevicePluginResourceCapacity() (corev1.ResourceList, corev1.ResourceList, []string) {
|
func (_m *MockContainerManager) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetDevicePluginResourceCapacity")
|
panic("no return value specified for GetDevicePluginResourceCapacity")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 corev1.ResourceList
|
var r0 v1.ResourceList
|
||||||
var r1 corev1.ResourceList
|
var r1 v1.ResourceList
|
||||||
var r2 []string
|
var r2 []string
|
||||||
if rf, ok := ret.Get(0).(func() (corev1.ResourceList, corev1.ResourceList, []string)); ok {
|
if rf, ok := ret.Get(0).(func() (v1.ResourceList, v1.ResourceList, []string)); ok {
|
||||||
return rf()
|
return rf()
|
||||||
}
|
}
|
||||||
if rf, ok := ret.Get(0).(func() corev1.ResourceList); ok {
|
if rf, ok := ret.Get(0).(func() v1.ResourceList); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).(corev1.ResourceList)
|
r0 = ret.Get(0).(v1.ResourceList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func() corev1.ResourceList); ok {
|
if rf, ok := ret.Get(1).(func() v1.ResourceList); ok {
|
||||||
r1 = rf()
|
r1 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(1) != nil {
|
if ret.Get(1) != nil {
|
||||||
r1 = ret.Get(1).(corev1.ResourceList)
|
r1 = ret.Get(1).(v1.ResourceList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,30 +452,30 @@ func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) Run(run fun
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) Return(_a0 corev1.ResourceList, _a1 corev1.ResourceList, _a2 []string) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
|
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) Return(_a0 v1.ResourceList, _a1 v1.ResourceList, _a2 []string) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
|
||||||
_c.Call.Return(_a0, _a1, _a2)
|
_c.Call.Return(_a0, _a1, _a2)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) RunAndReturn(run func() (corev1.ResourceList, corev1.ResourceList, []string)) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
|
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) RunAndReturn(run func() (v1.ResourceList, v1.ResourceList, []string)) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDevices provides a mock function with given fields: podUID, containerName
|
// GetDevices provides a mock function with given fields: podUID, containerName
|
||||||
func (_m *MockContainerManager) GetDevices(podUID string, containerName string) []*v1.ContainerDevices {
|
func (_m *MockContainerManager) GetDevices(podUID string, containerName string) []*podresourcesv1.ContainerDevices {
|
||||||
ret := _m.Called(podUID, containerName)
|
ret := _m.Called(podUID, containerName)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetDevices")
|
panic("no return value specified for GetDevices")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 []*v1.ContainerDevices
|
var r0 []*podresourcesv1.ContainerDevices
|
||||||
if rf, ok := ret.Get(0).(func(string, string) []*v1.ContainerDevices); ok {
|
if rf, ok := ret.Get(0).(func(string, string) []*podresourcesv1.ContainerDevices); ok {
|
||||||
r0 = rf(podUID, containerName)
|
r0 = rf(podUID, containerName)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).([]*v1.ContainerDevices)
|
r0 = ret.Get(0).([]*podresourcesv1.ContainerDevices)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,30 +501,30 @@ func (_c *MockContainerManager_GetDevices_Call) Run(run func(podUID string, cont
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDevices_Call) Return(_a0 []*v1.ContainerDevices) *MockContainerManager_GetDevices_Call {
|
func (_c *MockContainerManager_GetDevices_Call) Return(_a0 []*podresourcesv1.ContainerDevices) *MockContainerManager_GetDevices_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDevices_Call) RunAndReturn(run func(string, string) []*v1.ContainerDevices) *MockContainerManager_GetDevices_Call {
|
func (_c *MockContainerManager_GetDevices_Call) RunAndReturn(run func(string, string) []*podresourcesv1.ContainerDevices) *MockContainerManager_GetDevices_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDynamicResources provides a mock function with given fields: pod, _a1
|
// GetDynamicResources provides a mock function with given fields: pod, _a1
|
||||||
func (_m *MockContainerManager) GetDynamicResources(pod *corev1.Pod, _a1 *corev1.Container) []*v1.DynamicResource {
|
func (_m *MockContainerManager) GetDynamicResources(pod *v1.Pod, _a1 *v1.Container) []*podresourcesv1.DynamicResource {
|
||||||
ret := _m.Called(pod, _a1)
|
ret := _m.Called(pod, _a1)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetDynamicResources")
|
panic("no return value specified for GetDynamicResources")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 []*v1.DynamicResource
|
var r0 []*podresourcesv1.DynamicResource
|
||||||
if rf, ok := ret.Get(0).(func(*corev1.Pod, *corev1.Container) []*v1.DynamicResource); ok {
|
if rf, ok := ret.Get(0).(func(*v1.Pod, *v1.Container) []*podresourcesv1.DynamicResource); ok {
|
||||||
r0 = rf(pod, _a1)
|
r0 = rf(pod, _a1)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).([]*v1.DynamicResource)
|
r0 = ret.Get(0).([]*podresourcesv1.DynamicResource)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,25 +537,25 @@ type MockContainerManager_GetDynamicResources_Call struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetDynamicResources is a helper method to define mock.On call
|
// GetDynamicResources is a helper method to define mock.On call
|
||||||
// - pod *corev1.Pod
|
// - pod *v1.Pod
|
||||||
// - _a1 *corev1.Container
|
// - _a1 *v1.Container
|
||||||
func (_e *MockContainerManager_Expecter) GetDynamicResources(pod interface{}, _a1 interface{}) *MockContainerManager_GetDynamicResources_Call {
|
func (_e *MockContainerManager_Expecter) GetDynamicResources(pod interface{}, _a1 interface{}) *MockContainerManager_GetDynamicResources_Call {
|
||||||
return &MockContainerManager_GetDynamicResources_Call{Call: _e.mock.On("GetDynamicResources", pod, _a1)}
|
return &MockContainerManager_GetDynamicResources_Call{Call: _e.mock.On("GetDynamicResources", pod, _a1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDynamicResources_Call) Run(run func(pod *corev1.Pod, _a1 *corev1.Container)) *MockContainerManager_GetDynamicResources_Call {
|
func (_c *MockContainerManager_GetDynamicResources_Call) Run(run func(pod *v1.Pod, _a1 *v1.Container)) *MockContainerManager_GetDynamicResources_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(*corev1.Pod), args[1].(*corev1.Container))
|
run(args[0].(*v1.Pod), args[1].(*v1.Container))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDynamicResources_Call) Return(_a0 []*v1.DynamicResource) *MockContainerManager_GetDynamicResources_Call {
|
func (_c *MockContainerManager_GetDynamicResources_Call) Return(_a0 []*podresourcesv1.DynamicResource) *MockContainerManager_GetDynamicResources_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetDynamicResources_Call) RunAndReturn(run func(*corev1.Pod, *corev1.Container) []*v1.DynamicResource) *MockContainerManager_GetDynamicResources_Call {
|
func (_c *MockContainerManager_GetDynamicResources_Call) RunAndReturn(run func(*v1.Pod, *v1.Container) []*podresourcesv1.DynamicResource) *MockContainerManager_GetDynamicResources_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -561,19 +608,19 @@ func (_c *MockContainerManager_GetHealthCheckers_Call) RunAndReturn(run func() [
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMemory provides a mock function with given fields: podUID, containerName
|
// GetMemory provides a mock function with given fields: podUID, containerName
|
||||||
func (_m *MockContainerManager) GetMemory(podUID string, containerName string) []*v1.ContainerMemory {
|
func (_m *MockContainerManager) GetMemory(podUID string, containerName string) []*podresourcesv1.ContainerMemory {
|
||||||
ret := _m.Called(podUID, containerName)
|
ret := _m.Called(podUID, containerName)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetMemory")
|
panic("no return value specified for GetMemory")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 []*v1.ContainerMemory
|
var r0 []*podresourcesv1.ContainerMemory
|
||||||
if rf, ok := ret.Get(0).(func(string, string) []*v1.ContainerMemory); ok {
|
if rf, ok := ret.Get(0).(func(string, string) []*podresourcesv1.ContainerMemory); ok {
|
||||||
r0 = rf(podUID, containerName)
|
r0 = rf(podUID, containerName)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).([]*v1.ContainerMemory)
|
r0 = ret.Get(0).([]*podresourcesv1.ContainerMemory)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -599,12 +646,12 @@ func (_c *MockContainerManager_GetMemory_Call) Run(run func(podUID string, conta
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetMemory_Call) Return(_a0 []*v1.ContainerMemory) *MockContainerManager_GetMemory_Call {
|
func (_c *MockContainerManager_GetMemory_Call) Return(_a0 []*podresourcesv1.ContainerMemory) *MockContainerManager_GetMemory_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetMemory_Call) RunAndReturn(run func(string, string) []*v1.ContainerMemory) *MockContainerManager_GetMemory_Call {
|
func (_c *MockContainerManager_GetMemory_Call) RunAndReturn(run func(string, string) []*podresourcesv1.ContainerMemory) *MockContainerManager_GetMemory_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -657,19 +704,19 @@ func (_c *MockContainerManager_GetMountedSubsystems_Call) RunAndReturn(run func(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeAllocatableAbsolute provides a mock function with given fields:
|
// GetNodeAllocatableAbsolute provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) GetNodeAllocatableAbsolute() corev1.ResourceList {
|
func (_m *MockContainerManager) GetNodeAllocatableAbsolute() v1.ResourceList {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetNodeAllocatableAbsolute")
|
panic("no return value specified for GetNodeAllocatableAbsolute")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 corev1.ResourceList
|
var r0 v1.ResourceList
|
||||||
if rf, ok := ret.Get(0).(func() corev1.ResourceList); ok {
|
if rf, ok := ret.Get(0).(func() v1.ResourceList); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).(corev1.ResourceList)
|
r0 = ret.Get(0).(v1.ResourceList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -693,30 +740,30 @@ func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) Run(run func())
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) Return(_a0 corev1.ResourceList) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
|
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) Return(_a0 v1.ResourceList) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) RunAndReturn(run func() corev1.ResourceList) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
|
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) RunAndReturn(run func() v1.ResourceList) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeAllocatableReservation provides a mock function with given fields:
|
// GetNodeAllocatableReservation provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) GetNodeAllocatableReservation() corev1.ResourceList {
|
func (_m *MockContainerManager) GetNodeAllocatableReservation() v1.ResourceList {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for GetNodeAllocatableReservation")
|
panic("no return value specified for GetNodeAllocatableReservation")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 corev1.ResourceList
|
var r0 v1.ResourceList
|
||||||
if rf, ok := ret.Get(0).(func() corev1.ResourceList); ok {
|
if rf, ok := ret.Get(0).(func() v1.ResourceList); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).(corev1.ResourceList)
|
r0 = ret.Get(0).(v1.ResourceList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -740,12 +787,12 @@ func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) Run(run func(
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) Return(_a0 corev1.ResourceList) *MockContainerManager_GetNodeAllocatableReservation_Call {
|
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) Return(_a0 v1.ResourceList) *MockContainerManager_GetNodeAllocatableReservation_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) RunAndReturn(run func() corev1.ResourceList) *MockContainerManager_GetNodeAllocatableReservation_Call {
|
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) RunAndReturn(run func() v1.ResourceList) *MockContainerManager_GetNodeAllocatableReservation_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -933,7 +980,7 @@ func (_c *MockContainerManager_GetQOSContainersInfo_Call) RunAndReturn(run func(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetResources provides a mock function with given fields: ctx, pod, _a2
|
// GetResources provides a mock function with given fields: ctx, pod, _a2
|
||||||
func (_m *MockContainerManager) GetResources(ctx context.Context, pod *corev1.Pod, _a2 *corev1.Container) (*container.RunContainerOptions, error) {
|
func (_m *MockContainerManager) GetResources(ctx context.Context, pod *v1.Pod, _a2 *v1.Container) (*container.RunContainerOptions, error) {
|
||||||
ret := _m.Called(ctx, pod, _a2)
|
ret := _m.Called(ctx, pod, _a2)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
@ -942,10 +989,10 @@ func (_m *MockContainerManager) GetResources(ctx context.Context, pod *corev1.Po
|
|||||||
|
|
||||||
var r0 *container.RunContainerOptions
|
var r0 *container.RunContainerOptions
|
||||||
var r1 error
|
var r1 error
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *corev1.Pod, *corev1.Container) (*container.RunContainerOptions, error)); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *v1.Pod, *v1.Container) (*container.RunContainerOptions, error)); ok {
|
||||||
return rf(ctx, pod, _a2)
|
return rf(ctx, pod, _a2)
|
||||||
}
|
}
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *corev1.Pod, *corev1.Container) *container.RunContainerOptions); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *v1.Pod, *v1.Container) *container.RunContainerOptions); ok {
|
||||||
r0 = rf(ctx, pod, _a2)
|
r0 = rf(ctx, pod, _a2)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
@ -953,7 +1000,7 @@ func (_m *MockContainerManager) GetResources(ctx context.Context, pod *corev1.Po
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, *corev1.Pod, *corev1.Container) error); ok {
|
if rf, ok := ret.Get(1).(func(context.Context, *v1.Pod, *v1.Container) error); ok {
|
||||||
r1 = rf(ctx, pod, _a2)
|
r1 = rf(ctx, pod, _a2)
|
||||||
} else {
|
} else {
|
||||||
r1 = ret.Error(1)
|
r1 = ret.Error(1)
|
||||||
@ -969,15 +1016,15 @@ type MockContainerManager_GetResources_Call struct {
|
|||||||
|
|
||||||
// GetResources is a helper method to define mock.On call
|
// GetResources is a helper method to define mock.On call
|
||||||
// - ctx context.Context
|
// - ctx context.Context
|
||||||
// - pod *corev1.Pod
|
// - pod *v1.Pod
|
||||||
// - _a2 *corev1.Container
|
// - _a2 *v1.Container
|
||||||
func (_e *MockContainerManager_Expecter) GetResources(ctx interface{}, pod interface{}, _a2 interface{}) *MockContainerManager_GetResources_Call {
|
func (_e *MockContainerManager_Expecter) GetResources(ctx interface{}, pod interface{}, _a2 interface{}) *MockContainerManager_GetResources_Call {
|
||||||
return &MockContainerManager_GetResources_Call{Call: _e.mock.On("GetResources", ctx, pod, _a2)}
|
return &MockContainerManager_GetResources_Call{Call: _e.mock.On("GetResources", ctx, pod, _a2)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetResources_Call) Run(run func(ctx context.Context, pod *corev1.Pod, _a2 *corev1.Container)) *MockContainerManager_GetResources_Call {
|
func (_c *MockContainerManager_GetResources_Call) Run(run func(ctx context.Context, pod *v1.Pod, _a2 *v1.Container)) *MockContainerManager_GetResources_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(context.Context), args[1].(*corev1.Pod), args[2].(*corev1.Container))
|
run(args[0].(context.Context), args[1].(*v1.Pod), args[2].(*v1.Container))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -987,7 +1034,7 @@ func (_c *MockContainerManager_GetResources_Call) Return(_a0 *container.RunConta
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_GetResources_Call) RunAndReturn(run func(context.Context, *corev1.Pod, *corev1.Container) (*container.RunContainerOptions, error)) *MockContainerManager_GetResources_Call {
|
func (_c *MockContainerManager_GetResources_Call) RunAndReturn(run func(context.Context, *v1.Pod, *v1.Container) (*container.RunContainerOptions, error)) *MockContainerManager_GetResources_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1086,6 +1133,52 @@ func (_c *MockContainerManager_NewPodContainerManager_Call) RunAndReturn(run fun
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodHasExclusiveCPUs provides a mock function with given fields: pod
|
||||||
|
func (_m *MockContainerManager) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||||
|
ret := _m.Called(pod)
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
panic("no return value specified for PodHasExclusiveCPUs")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r0 bool
|
||||||
|
if rf, ok := ret.Get(0).(func(*v1.Pod) bool); ok {
|
||||||
|
r0 = rf(pod)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockContainerManager_PodHasExclusiveCPUs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PodHasExclusiveCPUs'
|
||||||
|
type MockContainerManager_PodHasExclusiveCPUs_Call struct {
|
||||||
|
*mock.Call
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodHasExclusiveCPUs is a helper method to define mock.On call
|
||||||
|
// - pod *v1.Pod
|
||||||
|
func (_e *MockContainerManager_Expecter) PodHasExclusiveCPUs(pod interface{}) *MockContainerManager_PodHasExclusiveCPUs_Call {
|
||||||
|
return &MockContainerManager_PodHasExclusiveCPUs_Call{Call: _e.mock.On("PodHasExclusiveCPUs", pod)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockContainerManager_PodHasExclusiveCPUs_Call) Run(run func(pod *v1.Pod)) *MockContainerManager_PodHasExclusiveCPUs_Call {
|
||||||
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
|
run(args[0].(*v1.Pod))
|
||||||
|
})
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockContainerManager_PodHasExclusiveCPUs_Call) Return(_a0 bool) *MockContainerManager_PodHasExclusiveCPUs_Call {
|
||||||
|
_c.Call.Return(_a0)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockContainerManager_PodHasExclusiveCPUs_Call) RunAndReturn(run func(*v1.Pod) bool) *MockContainerManager_PodHasExclusiveCPUs_Call {
|
||||||
|
_c.Call.Return(run)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// PodMightNeedToUnprepareResources provides a mock function with given fields: UID
|
// PodMightNeedToUnprepareResources provides a mock function with given fields: UID
|
||||||
func (_m *MockContainerManager) PodMightNeedToUnprepareResources(UID types.UID) bool {
|
func (_m *MockContainerManager) PodMightNeedToUnprepareResources(UID types.UID) bool {
|
||||||
ret := _m.Called(UID)
|
ret := _m.Called(UID)
|
||||||
@ -1133,7 +1226,7 @@ func (_c *MockContainerManager_PodMightNeedToUnprepareResources_Call) RunAndRetu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PrepareDynamicResources provides a mock function with given fields: _a0, _a1
|
// PrepareDynamicResources provides a mock function with given fields: _a0, _a1
|
||||||
func (_m *MockContainerManager) PrepareDynamicResources(_a0 context.Context, _a1 *corev1.Pod) error {
|
func (_m *MockContainerManager) PrepareDynamicResources(_a0 context.Context, _a1 *v1.Pod) error {
|
||||||
ret := _m.Called(_a0, _a1)
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
@ -1141,7 +1234,7 @@ func (_m *MockContainerManager) PrepareDynamicResources(_a0 context.Context, _a1
|
|||||||
}
|
}
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *corev1.Pod) error); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *v1.Pod) error); ok {
|
||||||
r0 = rf(_a0, _a1)
|
r0 = rf(_a0, _a1)
|
||||||
} else {
|
} else {
|
||||||
r0 = ret.Error(0)
|
r0 = ret.Error(0)
|
||||||
@ -1157,14 +1250,14 @@ type MockContainerManager_PrepareDynamicResources_Call struct {
|
|||||||
|
|
||||||
// PrepareDynamicResources is a helper method to define mock.On call
|
// PrepareDynamicResources is a helper method to define mock.On call
|
||||||
// - _a0 context.Context
|
// - _a0 context.Context
|
||||||
// - _a1 *corev1.Pod
|
// - _a1 *v1.Pod
|
||||||
func (_e *MockContainerManager_Expecter) PrepareDynamicResources(_a0 interface{}, _a1 interface{}) *MockContainerManager_PrepareDynamicResources_Call {
|
func (_e *MockContainerManager_Expecter) PrepareDynamicResources(_a0 interface{}, _a1 interface{}) *MockContainerManager_PrepareDynamicResources_Call {
|
||||||
return &MockContainerManager_PrepareDynamicResources_Call{Call: _e.mock.On("PrepareDynamicResources", _a0, _a1)}
|
return &MockContainerManager_PrepareDynamicResources_Call{Call: _e.mock.On("PrepareDynamicResources", _a0, _a1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_PrepareDynamicResources_Call) Run(run func(_a0 context.Context, _a1 *corev1.Pod)) *MockContainerManager_PrepareDynamicResources_Call {
|
func (_c *MockContainerManager_PrepareDynamicResources_Call) Run(run func(_a0 context.Context, _a1 *v1.Pod)) *MockContainerManager_PrepareDynamicResources_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(context.Context), args[1].(*corev1.Pod))
|
run(args[0].(context.Context), args[1].(*v1.Pod))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1174,7 +1267,7 @@ func (_c *MockContainerManager_PrepareDynamicResources_Call) Return(_a0 error) *
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_PrepareDynamicResources_Call) RunAndReturn(run func(context.Context, *corev1.Pod) error) *MockContainerManager_PrepareDynamicResources_Call {
|
func (_c *MockContainerManager_PrepareDynamicResources_Call) RunAndReturn(run func(context.Context, *v1.Pod) error) *MockContainerManager_PrepareDynamicResources_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1225,7 +1318,7 @@ func (_c *MockContainerManager_ShouldResetExtendedResourceCapacity_Call) RunAndR
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7
|
// Start provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7
|
||||||
func (_m *MockContainerManager) Start(_a0 context.Context, _a1 *corev1.Node, _a2 cm.ActivePodsFunc, _a3 cm.GetNodeFunc, _a4 config.SourcesReady, _a5 status.PodStatusProvider, _a6 cri.RuntimeService, _a7 bool) error {
|
func (_m *MockContainerManager) Start(_a0 context.Context, _a1 *v1.Node, _a2 cm.ActivePodsFunc, _a3 cm.GetNodeFunc, _a4 config.SourcesReady, _a5 status.PodStatusProvider, _a6 cri.RuntimeService, _a7 bool) error {
|
||||||
ret := _m.Called(_a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7)
|
ret := _m.Called(_a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
@ -1233,7 +1326,7 @@ func (_m *MockContainerManager) Start(_a0 context.Context, _a1 *corev1.Node, _a2
|
|||||||
}
|
}
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *corev1.Node, cm.ActivePodsFunc, cm.GetNodeFunc, config.SourcesReady, status.PodStatusProvider, cri.RuntimeService, bool) error); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *v1.Node, cm.ActivePodsFunc, cm.GetNodeFunc, config.SourcesReady, status.PodStatusProvider, cri.RuntimeService, bool) error); ok {
|
||||||
r0 = rf(_a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7)
|
r0 = rf(_a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7)
|
||||||
} else {
|
} else {
|
||||||
r0 = ret.Error(0)
|
r0 = ret.Error(0)
|
||||||
@ -1249,7 +1342,7 @@ type MockContainerManager_Start_Call struct {
|
|||||||
|
|
||||||
// Start is a helper method to define mock.On call
|
// Start is a helper method to define mock.On call
|
||||||
// - _a0 context.Context
|
// - _a0 context.Context
|
||||||
// - _a1 *corev1.Node
|
// - _a1 *v1.Node
|
||||||
// - _a2 cm.ActivePodsFunc
|
// - _a2 cm.ActivePodsFunc
|
||||||
// - _a3 cm.GetNodeFunc
|
// - _a3 cm.GetNodeFunc
|
||||||
// - _a4 config.SourcesReady
|
// - _a4 config.SourcesReady
|
||||||
@ -1260,9 +1353,9 @@ func (_e *MockContainerManager_Expecter) Start(_a0 interface{}, _a1 interface{},
|
|||||||
return &MockContainerManager_Start_Call{Call: _e.mock.On("Start", _a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7)}
|
return &MockContainerManager_Start_Call{Call: _e.mock.On("Start", _a0, _a1, _a2, _a3, _a4, _a5, _a6, _a7)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_Start_Call) Run(run func(_a0 context.Context, _a1 *corev1.Node, _a2 cm.ActivePodsFunc, _a3 cm.GetNodeFunc, _a4 config.SourcesReady, _a5 status.PodStatusProvider, _a6 cri.RuntimeService, _a7 bool)) *MockContainerManager_Start_Call {
|
func (_c *MockContainerManager_Start_Call) Run(run func(_a0 context.Context, _a1 *v1.Node, _a2 cm.ActivePodsFunc, _a3 cm.GetNodeFunc, _a4 config.SourcesReady, _a5 status.PodStatusProvider, _a6 cri.RuntimeService, _a7 bool)) *MockContainerManager_Start_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(context.Context), args[1].(*corev1.Node), args[2].(cm.ActivePodsFunc), args[3].(cm.GetNodeFunc), args[4].(config.SourcesReady), args[5].(status.PodStatusProvider), args[6].(cri.RuntimeService), args[7].(bool))
|
run(args[0].(context.Context), args[1].(*v1.Node), args[2].(cm.ActivePodsFunc), args[3].(cm.GetNodeFunc), args[4].(config.SourcesReady), args[5].(status.PodStatusProvider), args[6].(cri.RuntimeService), args[7].(bool))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1272,7 +1365,7 @@ func (_c *MockContainerManager_Start_Call) Return(_a0 error) *MockContainerManag
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_Start_Call) RunAndReturn(run func(context.Context, *corev1.Node, cm.ActivePodsFunc, cm.GetNodeFunc, config.SourcesReady, status.PodStatusProvider, cri.RuntimeService, bool) error) *MockContainerManager_Start_Call {
|
func (_c *MockContainerManager_Start_Call) RunAndReturn(run func(context.Context, *v1.Node, cm.ActivePodsFunc, cm.GetNodeFunc, config.SourcesReady, status.PodStatusProvider, cri.RuntimeService, bool) error) *MockContainerManager_Start_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1323,19 +1416,19 @@ func (_c *MockContainerManager_Status_Call) RunAndReturn(run func() cm.Status) *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SystemCgroupsLimit provides a mock function with given fields:
|
// SystemCgroupsLimit provides a mock function with given fields:
|
||||||
func (_m *MockContainerManager) SystemCgroupsLimit() corev1.ResourceList {
|
func (_m *MockContainerManager) SystemCgroupsLimit() v1.ResourceList {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for SystemCgroupsLimit")
|
panic("no return value specified for SystemCgroupsLimit")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 corev1.ResourceList
|
var r0 v1.ResourceList
|
||||||
if rf, ok := ret.Get(0).(func() corev1.ResourceList); ok {
|
if rf, ok := ret.Get(0).(func() v1.ResourceList); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).(corev1.ResourceList)
|
r0 = ret.Get(0).(v1.ResourceList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1359,18 +1452,18 @@ func (_c *MockContainerManager_SystemCgroupsLimit_Call) Run(run func()) *MockCon
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_SystemCgroupsLimit_Call) Return(_a0 corev1.ResourceList) *MockContainerManager_SystemCgroupsLimit_Call {
|
func (_c *MockContainerManager_SystemCgroupsLimit_Call) Return(_a0 v1.ResourceList) *MockContainerManager_SystemCgroupsLimit_Call {
|
||||||
_c.Call.Return(_a0)
|
_c.Call.Return(_a0)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_SystemCgroupsLimit_Call) RunAndReturn(run func() corev1.ResourceList) *MockContainerManager_SystemCgroupsLimit_Call {
|
func (_c *MockContainerManager_SystemCgroupsLimit_Call) RunAndReturn(run func() v1.ResourceList) *MockContainerManager_SystemCgroupsLimit_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnprepareDynamicResources provides a mock function with given fields: _a0, _a1
|
// UnprepareDynamicResources provides a mock function with given fields: _a0, _a1
|
||||||
func (_m *MockContainerManager) UnprepareDynamicResources(_a0 context.Context, _a1 *corev1.Pod) error {
|
func (_m *MockContainerManager) UnprepareDynamicResources(_a0 context.Context, _a1 *v1.Pod) error {
|
||||||
ret := _m.Called(_a0, _a1)
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
@ -1378,7 +1471,7 @@ func (_m *MockContainerManager) UnprepareDynamicResources(_a0 context.Context, _
|
|||||||
}
|
}
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *corev1.Pod) error); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *v1.Pod) error); ok {
|
||||||
r0 = rf(_a0, _a1)
|
r0 = rf(_a0, _a1)
|
||||||
} else {
|
} else {
|
||||||
r0 = ret.Error(0)
|
r0 = ret.Error(0)
|
||||||
@ -1394,14 +1487,14 @@ type MockContainerManager_UnprepareDynamicResources_Call struct {
|
|||||||
|
|
||||||
// UnprepareDynamicResources is a helper method to define mock.On call
|
// UnprepareDynamicResources is a helper method to define mock.On call
|
||||||
// - _a0 context.Context
|
// - _a0 context.Context
|
||||||
// - _a1 *corev1.Pod
|
// - _a1 *v1.Pod
|
||||||
func (_e *MockContainerManager_Expecter) UnprepareDynamicResources(_a0 interface{}, _a1 interface{}) *MockContainerManager_UnprepareDynamicResources_Call {
|
func (_e *MockContainerManager_Expecter) UnprepareDynamicResources(_a0 interface{}, _a1 interface{}) *MockContainerManager_UnprepareDynamicResources_Call {
|
||||||
return &MockContainerManager_UnprepareDynamicResources_Call{Call: _e.mock.On("UnprepareDynamicResources", _a0, _a1)}
|
return &MockContainerManager_UnprepareDynamicResources_Call{Call: _e.mock.On("UnprepareDynamicResources", _a0, _a1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_UnprepareDynamicResources_Call) Run(run func(_a0 context.Context, _a1 *corev1.Pod)) *MockContainerManager_UnprepareDynamicResources_Call {
|
func (_c *MockContainerManager_UnprepareDynamicResources_Call) Run(run func(_a0 context.Context, _a1 *v1.Pod)) *MockContainerManager_UnprepareDynamicResources_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(context.Context), args[1].(*corev1.Pod))
|
run(args[0].(context.Context), args[1].(*v1.Pod))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1411,7 +1504,7 @@ func (_c *MockContainerManager_UnprepareDynamicResources_Call) Return(_a0 error)
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_UnprepareDynamicResources_Call) RunAndReturn(run func(context.Context, *corev1.Pod) error) *MockContainerManager_UnprepareDynamicResources_Call {
|
func (_c *MockContainerManager_UnprepareDynamicResources_Call) RunAndReturn(run func(context.Context, *v1.Pod) error) *MockContainerManager_UnprepareDynamicResources_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1449,7 +1542,7 @@ func (_c *MockContainerManager_UpdateAllocatedDevices_Call) RunAndReturn(run fun
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateAllocatedResourcesStatus provides a mock function with given fields: pod, _a1
|
// UpdateAllocatedResourcesStatus provides a mock function with given fields: pod, _a1
|
||||||
func (_m *MockContainerManager) UpdateAllocatedResourcesStatus(pod *corev1.Pod, _a1 *corev1.PodStatus) {
|
func (_m *MockContainerManager) UpdateAllocatedResourcesStatus(pod *v1.Pod, _a1 *v1.PodStatus) {
|
||||||
_m.Called(pod, _a1)
|
_m.Called(pod, _a1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1459,15 +1552,15 @@ type MockContainerManager_UpdateAllocatedResourcesStatus_Call struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateAllocatedResourcesStatus is a helper method to define mock.On call
|
// UpdateAllocatedResourcesStatus is a helper method to define mock.On call
|
||||||
// - pod *corev1.Pod
|
// - pod *v1.Pod
|
||||||
// - _a1 *corev1.PodStatus
|
// - _a1 *v1.PodStatus
|
||||||
func (_e *MockContainerManager_Expecter) UpdateAllocatedResourcesStatus(pod interface{}, _a1 interface{}) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
|
func (_e *MockContainerManager_Expecter) UpdateAllocatedResourcesStatus(pod interface{}, _a1 interface{}) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
|
||||||
return &MockContainerManager_UpdateAllocatedResourcesStatus_Call{Call: _e.mock.On("UpdateAllocatedResourcesStatus", pod, _a1)}
|
return &MockContainerManager_UpdateAllocatedResourcesStatus_Call{Call: _e.mock.On("UpdateAllocatedResourcesStatus", pod, _a1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) Run(run func(pod *corev1.Pod, _a1 *corev1.PodStatus)) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
|
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) Run(run func(pod *v1.Pod, _a1 *v1.PodStatus)) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(*corev1.Pod), args[1].(*corev1.PodStatus))
|
run(args[0].(*v1.Pod), args[1].(*v1.PodStatus))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -1477,7 +1570,7 @@ func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) Return() *Mo
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) RunAndReturn(run func(*corev1.Pod, *corev1.PodStatus)) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
|
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) RunAndReturn(run func(*v1.Pod, *v1.PodStatus)) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
@ -108,6 +108,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
|||||||
startupManager: proberesults.NewManager(),
|
startupManager: proberesults.NewManager(),
|
||||||
machineInfo: machineInfo,
|
machineInfo: machineInfo,
|
||||||
osInterface: osInterface,
|
osInterface: osInterface,
|
||||||
|
containerManager: cm.NewFakeContainerManager(),
|
||||||
runtimeHelper: runtimeHelper,
|
runtimeHelper: runtimeHelper,
|
||||||
runtimeService: runtimeService,
|
runtimeService: runtimeService,
|
||||||
imageService: imageService,
|
imageService: imageService,
|
||||||
|
@ -133,7 +133,12 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
|
|||||||
|
|
||||||
memoryLimit := getMemoryLimit(pod, container)
|
memoryLimit := getMemoryLimit(pod, container)
|
||||||
cpuLimit := getCPULimit(pod, container)
|
cpuLimit := getCPULimit(pod, container)
|
||||||
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit)
|
|
||||||
|
// If pod has exclusive cpu and the container in question has integer cpu requests
|
||||||
|
// the cfs quota will not be enforced
|
||||||
|
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.ContainerHasExclusiveCPUs(pod, container)
|
||||||
|
klog.V(2).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
|
||||||
|
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit, disableCPUQuota)
|
||||||
|
|
||||||
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
||||||
int64(m.machineInfo.MemoryCapacity)))
|
int64(m.machineInfo.MemoryCapacity)))
|
||||||
@ -244,7 +249,7 @@ func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, cont
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits
|
// calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits
|
||||||
func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.LinuxContainerResources {
|
func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity, disableCPUQuota bool) *runtimeapi.LinuxContainerResources {
|
||||||
resources := runtimeapi.LinuxContainerResources{}
|
resources := runtimeapi.LinuxContainerResources{}
|
||||||
var cpuShares int64
|
var cpuShares int64
|
||||||
|
|
||||||
@ -276,6 +281,9 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit
|
|||||||
}
|
}
|
||||||
cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod)
|
cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod)
|
||||||
resources.CpuQuota = cpuQuota
|
resources.CpuQuota = cpuQuota
|
||||||
|
if disableCPUQuota {
|
||||||
|
resources.CpuQuota = int64(-1)
|
||||||
|
}
|
||||||
resources.CpuPeriod = cpuPeriod
|
resources.CpuPeriod = cpuPeriod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -408,7 +408,7 @@ func TestCalculateLinuxResources(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
setCgroupVersionDuringTest(test.cgroupVersion)
|
setCgroupVersionDuringTest(test.cgroupVersion)
|
||||||
m.singleProcessOOMKill = ptr.To(test.singleProcessOOMKill)
|
m.singleProcessOOMKill = ptr.To(test.singleProcessOOMKill)
|
||||||
linuxContainerResources := m.calculateLinuxResources(test.cpuReq, test.cpuLim, test.memLim)
|
linuxContainerResources := m.calculateLinuxResources(test.cpuReq, test.cpuLim, test.memLim, false)
|
||||||
assert.Equal(t, test.expected, linuxContainerResources)
|
assert.Equal(t, test.expected, linuxContainerResources)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -687,7 +687,12 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
|||||||
func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerChanges podActions, result *kubecontainer.PodSyncResult) {
|
func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerChanges podActions, result *kubecontainer.PodSyncResult) {
|
||||||
pcm := m.containerManager.NewPodContainerManager()
|
pcm := m.containerManager.NewPodContainerManager()
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Figure out best way to get enforceMemoryQoS value (parameter #4 below) in platform-agnostic way
|
//TODO(vinaykul,InPlacePodVerticalScaling): Figure out best way to get enforceMemoryQoS value (parameter #4 below) in platform-agnostic way
|
||||||
podResources := cm.ResourceConfigForPod(pod, m.cpuCFSQuota, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
|
enforceCPULimits := m.cpuCFSQuota
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod) {
|
||||||
|
enforceCPULimits = false
|
||||||
|
klog.V(2).InfoS("Disabled CFS quota", "pod", klog.KObj(pod))
|
||||||
|
}
|
||||||
|
podResources := cm.ResourceConfigForPod(pod, enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
|
||||||
if podResources == nil {
|
if podResources == nil {
|
||||||
klog.ErrorS(nil, "Unable to get resource configuration", "pod", pod.Name)
|
klog.ErrorS(nil, "Unable to get resource configuration", "pod", pod.Name)
|
||||||
result.Fail(fmt.Errorf("unable to get resource configuration processing resize for pod %s", pod.Name))
|
result.Fail(fmt.Errorf("unable to get resource configuration processing resize for pod %s", pod.Name))
|
||||||
|
@ -2948,6 +2948,8 @@ func TestDoPodResizeAction(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
t.Run(tc.testName, func(t *testing.T) {
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
mockCM := cmtesting.NewMockContainerManager(t)
|
mockCM := cmtesting.NewMockContainerManager(t)
|
||||||
|
mockCM.EXPECT().PodHasExclusiveCPUs(mock.Anything).Return(false).Maybe()
|
||||||
|
mockCM.EXPECT().ContainerHasExclusiveCPUs(mock.Anything, mock.Anything).Return(false).Maybe()
|
||||||
m.containerManager = mockCM
|
m.containerManager = mockCM
|
||||||
mockPCM := cmtesting.NewMockPodContainerManager(t)
|
mockPCM := cmtesting.NewMockPodContainerManager(t)
|
||||||
mockCM.EXPECT().NewPodContainerManager().Return(mockPCM)
|
mockCM.EXPECT().NewPodContainerManager().Return(mockPCM)
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
|
||||||
resourcehelper "k8s.io/component-helpers/resource"
|
resourcehelper "k8s.io/component-helpers/resource"
|
||||||
@ -37,7 +38,7 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod)
|
|||||||
|
|
||||||
// For overhead, we do not differentiate between requests and limits. Treat this overhead
|
// For overhead, we do not differentiate between requests and limits. Treat this overhead
|
||||||
// as "guaranteed", with requests == limits
|
// as "guaranteed", with requests == limits
|
||||||
resources = m.calculateLinuxResources(cpu, cpu, memory)
|
resources = m.calculateLinuxResources(cpu, cpu, memory, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources
|
return resources
|
||||||
@ -55,7 +56,12 @@ func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runt
|
|||||||
if _, cpuRequestExists := req[v1.ResourceCPU]; cpuRequestExists {
|
if _, cpuRequestExists := req[v1.ResourceCPU]; cpuRequestExists {
|
||||||
cpuRequest = req.Cpu()
|
cpuRequest = req.Cpu()
|
||||||
}
|
}
|
||||||
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory())
|
|
||||||
|
// If pod has exclusive cpu the sandbox will not have cfs quote enforced
|
||||||
|
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod)
|
||||||
|
klog.V(2).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
|
||||||
|
|
||||||
|
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory(), disableCPUQuota)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||||
|
@ -70,6 +70,16 @@ func makeCPUManagerPod(podName string, ctnAttributes []ctnAttribute) *v1.Pod {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Command: []string{"sh", "-c", cpusetCmd},
|
Command: []string{"sh", "-c", cpusetCmd},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "sysfscgroup",
|
||||||
|
MountPath: "/sysfscgroup",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "podinfo",
|
||||||
|
MountPath: "/podinfo",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
containers = append(containers, ctn)
|
containers = append(containers, ctn)
|
||||||
}
|
}
|
||||||
@ -81,6 +91,30 @@ func makeCPUManagerPod(podName string, ctnAttributes []ctnAttribute) *v1.Pod {
|
|||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
Containers: containers,
|
Containers: containers,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "sysfscgroup",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "podinfo",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
DownwardAPI: &v1.DownwardAPIVolumeSource{
|
||||||
|
Items: []v1.DownwardAPIVolumeFile{
|
||||||
|
{
|
||||||
|
Path: "uid",
|
||||||
|
FieldRef: &v1.ObjectFieldSelector{
|
||||||
|
APIVersion: "v1",
|
||||||
|
FieldPath: "metadata.uid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -234,6 +268,7 @@ func getCoreSiblingList(cpuRes int64) string {
|
|||||||
type cpuManagerKubeletArguments struct {
|
type cpuManagerKubeletArguments struct {
|
||||||
policyName string
|
policyName string
|
||||||
enableCPUManagerOptions bool
|
enableCPUManagerOptions bool
|
||||||
|
disableCPUQuotaWithExclusiveCPUs bool
|
||||||
reservedSystemCPUs cpuset.CPUSet
|
reservedSystemCPUs cpuset.CPUSet
|
||||||
options map[string]string
|
options map[string]string
|
||||||
}
|
}
|
||||||
@ -247,6 +282,7 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku
|
|||||||
newCfg.FeatureGates["CPUManagerPolicyOptions"] = kubeletArguments.enableCPUManagerOptions
|
newCfg.FeatureGates["CPUManagerPolicyOptions"] = kubeletArguments.enableCPUManagerOptions
|
||||||
newCfg.FeatureGates["CPUManagerPolicyBetaOptions"] = kubeletArguments.enableCPUManagerOptions
|
newCfg.FeatureGates["CPUManagerPolicyBetaOptions"] = kubeletArguments.enableCPUManagerOptions
|
||||||
newCfg.FeatureGates["CPUManagerPolicyAlphaOptions"] = kubeletArguments.enableCPUManagerOptions
|
newCfg.FeatureGates["CPUManagerPolicyAlphaOptions"] = kubeletArguments.enableCPUManagerOptions
|
||||||
|
newCfg.FeatureGates["DisableCPUQuotaWithExclusiveCPUs"] = kubeletArguments.disableCPUQuotaWithExclusiveCPUs
|
||||||
|
|
||||||
newCfg.CPUManagerPolicy = kubeletArguments.policyName
|
newCfg.CPUManagerPolicy = kubeletArguments.policyName
|
||||||
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
|
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
|
||||||
@ -556,6 +592,178 @@ func runMultipleCPUContainersGuPod(ctx context.Context, f *framework.Framework)
|
|||||||
waitForContainerRemoval(ctx, pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(ctx, pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQuotaWithExclusiveCPUs bool) {
|
||||||
|
var err error
|
||||||
|
var ctnAttrs []ctnAttribute
|
||||||
|
var pod1, pod2, pod3 *v1.Pod
|
||||||
|
var cleanupPods []*v1.Pod
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
// waitForContainerRemoval takes "long" to complete; if we use the parent ctx we get a
|
||||||
|
// 'deadline expired' message and the cleanup aborts, which we don't want.
|
||||||
|
ctx2 := context.TODO()
|
||||||
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
|
for _, cleanupPod := range cleanupPods {
|
||||||
|
framework.Logf("deleting pod: %s/%s", cleanupPod.Namespace, cleanupPod.Name)
|
||||||
|
deletePodSyncByName(ctx2, f, cleanupPod.Name)
|
||||||
|
waitForContainerRemoval(ctx2, cleanupPod.Spec.Containers[0].Name, cleanupPod.Name, cleanupPod.Namespace)
|
||||||
|
framework.Logf("deleted pod: %s/%s", cleanupPod.Namespace, cleanupPod.Name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
cfsCheckCommand := []string{"sh", "-c", "cat /sys/fs/cgroup/cpu.max && sleep 1d"}
|
||||||
|
defaultPeriod := "100000"
|
||||||
|
|
||||||
|
ctnAttrs = []ctnAttribute{
|
||||||
|
{
|
||||||
|
ctnName: "gu-container-cfsquota-disabled",
|
||||||
|
cpuRequest: "1",
|
||||||
|
cpuLimit: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs)
|
||||||
|
pod1.Spec.Containers[0].Command = cfsCheckCommand
|
||||||
|
pod1 = e2epod.NewPodClient(f).CreateSync(ctx, pod1)
|
||||||
|
cleanupPods = append(cleanupPods, pod1)
|
||||||
|
|
||||||
|
ginkgo.By("checking if the expected cfs quota was assigned (GU pod, exclusive CPUs, unlimited)")
|
||||||
|
|
||||||
|
expectedQuota := "100000"
|
||||||
|
if disabledCPUQuotaWithExclusiveCPUs {
|
||||||
|
expectedQuota = "max"
|
||||||
|
}
|
||||||
|
expCFSQuotaRegex := fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod1.Name, pod1.Spec.Containers[0].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
|
pod1.Spec.Containers[0].Name, pod1.Name)
|
||||||
|
|
||||||
|
ctnAttrs = []ctnAttribute{
|
||||||
|
{
|
||||||
|
ctnName: "gu-container-cfsquota-enabled",
|
||||||
|
cpuRequest: "500m",
|
||||||
|
cpuLimit: "500m",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
|
||||||
|
pod2.Spec.Containers[0].Command = cfsCheckCommand
|
||||||
|
pod2 = e2epod.NewPodClient(f).CreateSync(ctx, pod2)
|
||||||
|
cleanupPods = append(cleanupPods, pod2)
|
||||||
|
|
||||||
|
ginkgo.By("checking if the expected cfs quota was assigned (GU pod, limited)")
|
||||||
|
|
||||||
|
expectedQuota = "50000"
|
||||||
|
expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
|
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||||
|
|
||||||
|
ctnAttrs = []ctnAttribute{
|
||||||
|
{
|
||||||
|
ctnName: "non-gu-container",
|
||||||
|
cpuRequest: "100m",
|
||||||
|
cpuLimit: "500m",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod3 = makeCPUManagerPod("non-gu-pod3", ctnAttrs)
|
||||||
|
pod3.Spec.Containers[0].Command = cfsCheckCommand
|
||||||
|
pod3 = e2epod.NewPodClient(f).CreateSync(ctx, pod3)
|
||||||
|
cleanupPods = append(cleanupPods, pod3)
|
||||||
|
|
||||||
|
ginkgo.By("checking if the expected cfs quota was assigned (BU pod, limited)")
|
||||||
|
|
||||||
|
expectedQuota = "50000"
|
||||||
|
expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod3.Name, pod3.Spec.Containers[0].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
|
pod3.Spec.Containers[0].Name, pod3.Name)
|
||||||
|
|
||||||
|
ctnAttrs = []ctnAttribute{
|
||||||
|
{
|
||||||
|
ctnName: "gu-container-non-int-values",
|
||||||
|
cpuRequest: "500m",
|
||||||
|
cpuLimit: "500m",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ctnName: "gu-container-int-values",
|
||||||
|
cpuRequest: "1",
|
||||||
|
cpuLimit: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod4 := makeCPUManagerPod("gu-pod4", ctnAttrs)
|
||||||
|
pod4.Spec.Containers[0].Command = cfsCheckCommand
|
||||||
|
pod4.Spec.Containers[1].Command = cfsCheckCommand
|
||||||
|
pod4 = e2epod.NewPodClient(f).CreateSync(ctx, pod4)
|
||||||
|
cleanupPods = append(cleanupPods, pod4)
|
||||||
|
|
||||||
|
ginkgo.By("checking if the expected cfs quota was assigned (GU pod, container 0 exclusive CPUs unlimited, container 1 limited)")
|
||||||
|
|
||||||
|
expectedQuota = "50000"
|
||||||
|
expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod4.Name, pod4.Spec.Containers[0].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
|
pod4.Spec.Containers[0].Name, pod4.Name)
|
||||||
|
expectedQuota = "100000"
|
||||||
|
if disabledCPUQuotaWithExclusiveCPUs {
|
||||||
|
expectedQuota = "max"
|
||||||
|
}
|
||||||
|
expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod4.Name, pod4.Spec.Containers[1].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
|
pod4.Spec.Containers[1].Name, pod4.Name)
|
||||||
|
|
||||||
|
ctnAttrs = []ctnAttribute{
|
||||||
|
{
|
||||||
|
ctnName: "gu-container-non-int-values",
|
||||||
|
cpuRequest: "500m",
|
||||||
|
cpuLimit: "500m",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ctnName: "gu-container-int-values",
|
||||||
|
cpuRequest: "1",
|
||||||
|
cpuLimit: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
podCFSCheckCommand := []string{"sh", "-c", `cat $(find /sysfscgroup | grep "$(cat /podinfo/uid | sed 's/-/_/g').slice/cpu.max$") && sleep 1d`}
|
||||||
|
|
||||||
|
pod5 := makeCPUManagerPod("gu-pod5", ctnAttrs)
|
||||||
|
pod5.Spec.Containers[0].Command = podCFSCheckCommand
|
||||||
|
pod5 = e2epod.NewPodClient(f).CreateSync(ctx, pod5)
|
||||||
|
cleanupPods = append(cleanupPods, pod5)
|
||||||
|
ginkgo.By("checking if the expected cfs quota was assigned to pod (GU pod, unlimited)")
|
||||||
|
|
||||||
|
expectedQuota = "150000"
|
||||||
|
|
||||||
|
if disabledCPUQuotaWithExclusiveCPUs {
|
||||||
|
expectedQuota = "max"
|
||||||
|
}
|
||||||
|
|
||||||
|
expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod5.Name, pod5.Spec.Containers[0].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod5.Spec.Containers[0].Name, pod5.Name)
|
||||||
|
|
||||||
|
ctnAttrs = []ctnAttribute{
|
||||||
|
{
|
||||||
|
ctnName: "gu-container",
|
||||||
|
cpuRequest: "100m",
|
||||||
|
cpuLimit: "100m",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod6 := makeCPUManagerPod("gu-pod6", ctnAttrs)
|
||||||
|
pod6.Spec.Containers[0].Command = podCFSCheckCommand
|
||||||
|
pod6 = e2epod.NewPodClient(f).CreateSync(ctx, pod6)
|
||||||
|
cleanupPods = append(cleanupPods, pod6)
|
||||||
|
|
||||||
|
ginkgo.By("checking if the expected cfs quota was assigned to pod (GU pod, limited)")
|
||||||
|
|
||||||
|
expectedQuota = "10000"
|
||||||
|
expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
|
||||||
|
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod6.Name, pod6.Spec.Containers[0].Name, expCFSQuotaRegex)
|
||||||
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod6.Spec.Containers[0].Name, pod6.Name)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func runMultipleGuPods(ctx context.Context, f *framework.Framework) {
|
func runMultipleGuPods(ctx context.Context, f *framework.Framework) {
|
||||||
var expAllowedCPUsListRegex string
|
var expAllowedCPUsListRegex string
|
||||||
var cpuList []int
|
var cpuList []int
|
||||||
@ -709,6 +917,37 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
runSMTAlignmentPositiveTests(ctx, f, smtLevel)
|
runSMTAlignmentPositiveTests(ctx, f, smtLevel)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should not enforce CFS quota for containers with static CPUs assigned", func(ctx context.Context) {
|
||||||
|
if !IsCgroup2UnifiedMode() {
|
||||||
|
e2eskipper.Skipf("Skipping since CgroupV2 not used")
|
||||||
|
}
|
||||||
|
newCfg := configureCPUManagerInKubelet(oldCfg,
|
||||||
|
&cpuManagerKubeletArguments{
|
||||||
|
policyName: string(cpumanager.PolicyStatic),
|
||||||
|
reservedSystemCPUs: cpuset.New(0),
|
||||||
|
disableCPUQuotaWithExclusiveCPUs: true,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
updateKubeletConfig(ctx, f, newCfg, true)
|
||||||
|
runCfsQuotaGuPods(ctx, f, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should keep enforcing the CFS quota for containers with static CPUs assigned and feature gate disabled", func(ctx context.Context) {
|
||||||
|
if !IsCgroup2UnifiedMode() {
|
||||||
|
e2eskipper.Skipf("Skipping since CgroupV2 not used")
|
||||||
|
}
|
||||||
|
newCfg := configureCPUManagerInKubelet(oldCfg,
|
||||||
|
&cpuManagerKubeletArguments{
|
||||||
|
policyName: string(cpumanager.PolicyStatic),
|
||||||
|
reservedSystemCPUs: cpuset.New(0),
|
||||||
|
disableCPUQuotaWithExclusiveCPUs: false,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
updateKubeletConfig(ctx, f, newCfg, true)
|
||||||
|
runCfsQuotaGuPods(ctx, f, false)
|
||||||
|
})
|
||||||
|
|
||||||
f.It("should not reuse CPUs of restartable init containers", feature.SidecarContainers, func(ctx context.Context) {
|
f.It("should not reuse CPUs of restartable init containers", feature.SidecarContainers, func(ctx context.Context) {
|
||||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
|
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
|
||||||
|
|
||||||
|
@ -420,6 +420,12 @@
|
|||||||
lockToDefault: true
|
lockToDefault: true
|
||||||
preRelease: GA
|
preRelease: GA
|
||||||
version: "1.31"
|
version: "1.31"
|
||||||
|
- name: DisableCPUQuotaWithExclusiveCPUs
|
||||||
|
versionedSpecs:
|
||||||
|
- default: true
|
||||||
|
lockToDefault: false
|
||||||
|
preRelease: Beta
|
||||||
|
version: "1.33"
|
||||||
- name: DisableKubeletCloudCredentialProviders
|
- name: DisableKubeletCloudCredentialProviders
|
||||||
versionedSpecs:
|
versionedSpecs:
|
||||||
- default: false
|
- default: false
|
||||||
|
Loading…
Reference in New Issue
Block a user