Address review comments

Signed-off-by: Swati Sehgal <swsehgal@redhat.com>
This commit is contained in:
Swati Sehgal
2025-10-31 17:30:48 +00:00
parent f2257059d7
commit f2d075ea65
7 changed files with 23 additions and 31 deletions

View File

@@ -17,7 +17,6 @@ limitations under the License.
package cm
import (
"context"
"fmt"
"os"
"path"
@@ -153,8 +152,7 @@ var _ CgroupManager = &cgroupV1impl{}
var _ CgroupManager = &cgroupV2impl{}
// NewCgroupManager is a factory method that returns a CgroupManager
func NewCgroupManager(ctx context.Context, cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
logger := klog.FromContext(ctx)
func NewCgroupManager(logger klog.Logger, cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
if libcontainercgroups.IsCgroup2UnifiedMode() {
return NewCgroupV2Manager(logger, cs, cgroupDriver)
}

View File

@@ -253,7 +253,7 @@ func NewContainerManager(ctx context.Context, mountUtil mount.Interface, cadviso
// Turn CgroupRoot from a string (in cgroupfs path format) to internal CgroupName
cgroupRoot := ParseCgroupfsToCgroupName(nodeConfig.CgroupRoot)
cgroupManager := NewCgroupManager(ctx, subsystems, nodeConfig.CgroupDriver)
cgroupManager := NewCgroupManager(logger, subsystems, nodeConfig.CgroupDriver)
nodeConfig.CgroupVersion = cgroupManager.Version()
// Check if Cgroup-root actually exists on the node
if nodeConfig.CgroupsPerQOS {

View File

@@ -258,7 +258,7 @@ func TestGetCapacity(t *testing.T) {
}
func TestNewPodContainerManager(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
logger, _ := ktesting.NewTestContext(t)
info := QOSContainersInfo{
Guaranteed: CgroupName{"guaranteed"},
BestEffort: CgroupName{"besteffort"},
@@ -280,7 +280,7 @@ func TestNewPodContainerManager(t *testing.T) {
cm: &containerManagerImpl{
qosContainerManager: &qosContainerManagerImpl{
qosContainersInfo: info,
cgroupManager: NewCgroupManager(tCtx, &CgroupSubsystems{}, ""),
cgroupManager: NewCgroupManager(logger, &CgroupSubsystems{}, ""),
},
NodeConfig: QosDisabled,
@@ -291,7 +291,7 @@ func TestNewPodContainerManager(t *testing.T) {
cm: &containerManagerImpl{
qosContainerManager: &qosContainerManagerImpl{
qosContainersInfo: info,
cgroupManager: NewCgroupManager(tCtx, &CgroupSubsystems{}, ""),
cgroupManager: NewCgroupManager(logger, &CgroupSubsystems{}, ""),
},
NodeConfig: QosEnabled,
@@ -302,7 +302,7 @@ func TestNewPodContainerManager(t *testing.T) {
cm: &containerManagerImpl{
qosContainerManager: &qosContainerManagerImpl{
qosContainersInfo: info,
cgroupManager: NewCgroupManager(tCtx, &CgroupSubsystems{}, "systemd"),
cgroupManager: NewCgroupManager(logger, &CgroupSubsystems{}, "systemd"),
},
NodeConfig: QosEnabled,
@@ -313,7 +313,7 @@ func TestNewPodContainerManager(t *testing.T) {
cm: &containerManagerImpl{
qosContainerManager: &qosContainerManagerImpl{
qosContainersInfo: info,
cgroupManager: NewCgroupManager(tCtx, &CgroupSubsystems{}, "systemd"),
cgroupManager: NewCgroupManager(logger, &CgroupSubsystems{}, "systemd"),
},
NodeConfig: QosDisabled,

View File

@@ -37,13 +37,7 @@ var _ func() (*CgroupSubsystems, error) = GetCgroupSubsystems
var _ func(string) ([]int, error) = getCgroupProcs
var _ func(types.UID) string = GetPodCgroupNameSuffix
var _ func(string, bool, string) string = NodeAllocatableRoot
// We cannot simply update the function signature of GetKubeletContainer here because that function may be called from other places in the codebase, and its actual implementation must match all real usages.
// Changing its signature without updating all call sites and the implementation would introduce compilation errors.
// This type assertion is only to check at compile time that the signature matches—if the signature has changed elsewhere, this line should be updated or removed,
// but updating it here is not a "fix" unless the entire function and its usages are also changed accordingly.
// Therefore, the safest approach is to remove or comment out the assertion if there's a signature mismatch, rather than updating the signature here.
// var _ func(klog.Logger, string) (string, error) = GetKubeletContainer
var _ func(klog.Logger, string) (string, error) = GetKubeletContainer
// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds.
func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList {

View File

@@ -33,7 +33,7 @@ import (
)
func TestIsCgroupPod(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
logger, _ := ktesting.NewTestContext(t)
qosContainersInfo := QOSContainersInfo{
Guaranteed: RootCgroupName,
Burstable: NewCgroupName(RootCgroupName, strings.ToLower(string(v1.PodQOSBurstable))),
@@ -114,7 +114,7 @@ func TestIsCgroupPod(t *testing.T) {
}
for _, cgroupDriver := range []string{"cgroupfs", "systemd"} {
pcm := &podContainerManagerImpl{
cgroupManager: NewCgroupManager(tCtx, nil, cgroupDriver),
cgroupManager: NewCgroupManager(logger, nil, cgroupDriver),
enforceCPULimits: true,
qosContainersInfo: qosContainersInfo,
}
@@ -140,7 +140,7 @@ func TestIsCgroupPod(t *testing.T) {
}
func TestGetPodContainerName(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
logger, _ := ktesting.NewTestContext(t)
newGuaranteedPodWithUID := func(uid types.UID) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@@ -223,7 +223,7 @@ func TestGetPodContainerName(t *testing.T) {
{
name: "pod with qos guaranteed and cgroupfs",
fields: fields{
cgroupManager: NewCgroupManager(tCtx, nil, "cgroupfs"),
cgroupManager: NewCgroupManager(logger, nil, "cgroupfs"),
},
args: args{
pod: newGuaranteedPodWithUID("fake-uid-1"),
@@ -233,7 +233,7 @@ func TestGetPodContainerName(t *testing.T) {
}, {
name: "pod with qos guaranteed and systemd",
fields: fields{
cgroupManager: NewCgroupManager(tCtx, nil, "systemd"),
cgroupManager: NewCgroupManager(logger, nil, "systemd"),
},
args: args{
pod: newGuaranteedPodWithUID("fake-uid-2"),
@@ -243,7 +243,7 @@ func TestGetPodContainerName(t *testing.T) {
}, {
name: "pod with qos burstable and cgroupfs",
fields: fields{
cgroupManager: NewCgroupManager(tCtx, nil, "cgroupfs"),
cgroupManager: NewCgroupManager(logger, nil, "cgroupfs"),
},
args: args{
pod: newBurstablePodWithUID("fake-uid-3"),
@@ -253,7 +253,7 @@ func TestGetPodContainerName(t *testing.T) {
}, {
name: "pod with qos burstable and systemd",
fields: fields{
cgroupManager: NewCgroupManager(tCtx, nil, "systemd"),
cgroupManager: NewCgroupManager(logger, nil, "systemd"),
},
args: args{
pod: newBurstablePodWithUID("fake-uid-4"),
@@ -263,7 +263,7 @@ func TestGetPodContainerName(t *testing.T) {
}, {
name: "pod with qos best-effort and cgroupfs",
fields: fields{
cgroupManager: NewCgroupManager(tCtx, nil, "cgroupfs"),
cgroupManager: NewCgroupManager(logger, nil, "cgroupfs"),
},
args: args{
pod: newBestEffortPodWithUID("fake-uid-5"),
@@ -273,7 +273,7 @@ func TestGetPodContainerName(t *testing.T) {
}, {
name: "pod with qos best-effort and systemd",
fields: fields{
cgroupManager: NewCgroupManager(tCtx, nil, "systemd"),
cgroupManager: NewCgroupManager(logger, nil, "systemd"),
},
args: args{
pod: newBestEffortPodWithUID("fake-uid-6"),

View File

@@ -20,7 +20,6 @@ limitations under the License.
package cm
import (
"context"
"fmt"
"strconv"
"testing"
@@ -29,6 +28,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
)
@@ -108,7 +108,7 @@ func activeTestPods() []*v1.Pod {
}
}
func createTestQOSContainerManager(tCtx context.Context) (*qosContainerManagerImpl, error) {
func createTestQOSContainerManager(logger klog.Logger) (*qosContainerManagerImpl, error) {
subsystems, err := GetCgroupSubsystems()
if err != nil {
return nil, fmt.Errorf("failed to get mounted cgroup subsystems: %v", err)
@@ -119,7 +119,7 @@ func createTestQOSContainerManager(tCtx context.Context) (*qosContainerManagerIm
qosContainerManager := &qosContainerManagerImpl{
subsystems: subsystems,
cgroupManager: NewCgroupManager(tCtx, subsystems, "cgroupfs"),
cgroupManager: NewCgroupManager(logger, subsystems, "cgroupfs"),
cgroupRoot: cgroupRoot,
qosReserved: nil,
}
@@ -130,8 +130,8 @@ func createTestQOSContainerManager(tCtx context.Context) (*qosContainerManagerIm
}
func TestQoSContainerCgroup(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
m, err := createTestQOSContainerManager(tCtx)
logger, _ := ktesting.NewTestContext(t)
m, err := createTestQOSContainerManager(logger)
assert.NoError(t, err)
qosConfigs := map[v1.PodQOSClass]*CgroupConfig{

View File

@@ -234,7 +234,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
}
// Create a cgroup manager object for manipulating cgroups.
cgroupManager := cm.NewCgroupManager(context.Background(), subsystems, oldCfg.CgroupDriver)
cgroupManager := cm.NewCgroupManager(klog.Background(), subsystems, oldCfg.CgroupDriver)
ginkgo.DeferCleanup(destroyTemporaryCgroupsForReservation, cgroupManager)
ginkgo.DeferCleanup(func(ctx context.Context) {